repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sserrot/champion_relationships | venv/Lib/site-packages/IPython/core/shellapp.py | 1 | 17635 | # encoding: utf-8
"""
A mixin for :class:`~IPython.core.application.Application` classes that
launch InteractiveShell instances, load extensions, etc.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
from itertools import chain
import os
import sys
from traitlets.config.application import boolean_flag
from traitlets.config.configurable import Configurable
from traitlets.config.loader import Config
from IPython.core.application import SYSTEM_CONFIG_DIRS, ENV_CONFIG_DIRS
from IPython.core import pylabtools
from IPython.utils.contexts import preserve_keys
from IPython.utils.path import filefind
from traitlets import (
Unicode, Instance, List, Bool, CaselessStrEnum, observe,
)
from IPython.terminal import pt_inputhooks
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
backend_keys = sorted(pylabtools.backends.keys())
backend_keys.insert(0, 'auto')
shell_flags = {}
addflag = lambda *args: shell_flags.update(boolean_flag(*args))
addflag('autoindent', 'InteractiveShell.autoindent',
'Turn on autoindenting.', 'Turn off autoindenting.'
)
addflag('automagic', 'InteractiveShell.automagic',
"""Turn on the auto calling of magic commands. Type %%magic at the
IPython prompt for more information.""",
'Turn off the auto calling of magic commands.'
)
addflag('pdb', 'InteractiveShell.pdb',
"Enable auto calling the pdb debugger after every exception.",
"Disable auto calling the pdb debugger after every exception."
)
addflag('pprint', 'PlainTextFormatter.pprint',
"Enable auto pretty printing of results.",
"Disable auto pretty printing of results."
)
addflag('color-info', 'InteractiveShell.color_info',
"""IPython can display information about objects via a set of functions,
and optionally can use colors for this, syntax highlighting
source code and various other elements. This is on by default, but can cause
problems with some pagers. If you see such problems, you can disable the
colours.""",
"Disable using colors for info related things."
)
addflag('ignore-cwd', 'InteractiveShellApp.ignore_cwd',
"Exclude the current working directory from sys.path",
"Include the current working directory in sys.path",
)
nosep_config = Config()
nosep_config.InteractiveShell.separate_in = ''
nosep_config.InteractiveShell.separate_out = ''
nosep_config.InteractiveShell.separate_out2 = ''
shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
shell_flags['pylab'] = (
{'InteractiveShellApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""
)
shell_flags['matplotlib'] = (
{'InteractiveShellApp' : {'matplotlib' : 'auto'}},
"""Configure matplotlib for interactive use with
the default matplotlib backend."""
)
# it's possible we don't want short aliases for *all* of these:
shell_aliases = dict(
autocall='InteractiveShell.autocall',
colors='InteractiveShell.colors',
logfile='InteractiveShell.logfile',
logappend='InteractiveShell.logappend',
c='InteractiveShellApp.code_to_run',
m='InteractiveShellApp.module_to_run',
ext='InteractiveShellApp.extra_extension',
gui='InteractiveShellApp.gui',
pylab='InteractiveShellApp.pylab',
matplotlib='InteractiveShellApp.matplotlib',
)
shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class InteractiveShellApp(Configurable):
"""A Mixin for applications that start InteractiveShell instances.
Provides configurables for loading extensions and executing files
as part of configuring a Shell environment.
The following methods should be called by the :meth:`initialize` method
of the subclass:
- :meth:`init_path`
- :meth:`init_shell` (to be implemented by the subclass)
- :meth:`init_gui_pylab`
- :meth:`init_extensions`
- :meth:`init_code`
"""
extensions = List(Unicode(),
help="A list of dotted module names of IPython extensions to load."
).tag(config=True)
extra_extension = Unicode('',
help="dotted module name of an IPython extension to load."
).tag(config=True)
reraise_ipython_extension_failures = Bool(False,
help="Reraise exceptions encountered loading IPython extensions?",
).tag(config=True)
# Extensions that are always loaded (not configurable)
default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
hide_initial_ns = Bool(True,
help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
be hidden from tools like %who?"""
).tag(config=True)
exec_files = List(Unicode(),
help="""List of files to run at IPython startup."""
).tag(config=True)
exec_PYTHONSTARTUP = Bool(True,
help="""Run the file referenced by the PYTHONSTARTUP environment
variable at IPython startup."""
).tag(config=True)
file_to_run = Unicode('',
help="""A file to be run""").tag(config=True)
exec_lines = List(Unicode(),
help="""lines of code to run at IPython startup."""
).tag(config=True)
code_to_run = Unicode('',
help="Execute the given command string."
).tag(config=True)
module_to_run = Unicode('',
help="Run the module as a script."
).tag(config=True)
gui = CaselessStrEnum(gui_keys, allow_none=True,
help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
).tag(config=True)
matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
help="""Configure matplotlib for interactive use with
the default matplotlib backend."""
).tag(config=True)
pylab = CaselessStrEnum(backend_keys, allow_none=True,
help="""Pre-load matplotlib and numpy for interactive use,
selecting a particular matplotlib backend and loop integration.
"""
).tag(config=True)
pylab_import_all = Bool(True,
help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
and an ``import *`` is done from numpy and pylab, when using pylab mode.
When False, pylab mode should not import any names into the user namespace.
"""
).tag(config=True)
ignore_cwd = Bool(
False,
help="""If True, IPython will not add the current working directory to sys.path.
When False, the current working directory is added to sys.path, allowing imports
of modules defined in the current directory."""
).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# whether interact-loop should start
interact = Bool(True)
user_ns = Instance(dict, args=None, allow_none=True)
@observe('user_ns')
def _user_ns_changed(self, change):
if self.shell is not None:
self.shell.user_ns = change['new']
self.shell.init_user_ns()
def init_path(self):
"""Add current working directory, '', to sys.path
Unlike Python's default, we insert before the first `site-packages`
or `dist-packages` directory,
so that it is after the standard library.
.. versionchanged:: 7.2
Try to insert after the standard library, instead of first.
.. versionchanged:: 8.0
Allow optionally not including the current directory in sys.path
"""
if '' in sys.path or self.ignore_cwd:
return
for idx, path in enumerate(sys.path):
parent, last_part = os.path.split(path)
if last_part in {'site-packages', 'dist-packages'}:
break
else:
# no site-packages or dist-packages found (?!)
# back to original behavior of inserting at the front
idx = 0
sys.path.insert(idx, '')
def init_shell(self):
raise NotImplementedError("Override in subclasses")
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
enable = False
shell = self.shell
if self.pylab:
enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
key = self.pylab
elif self.matplotlib:
enable = shell.enable_matplotlib
key = self.matplotlib
elif self.gui:
enable = shell.enable_gui
key = self.gui
if not enable:
return
try:
r = enable(key)
except ImportError:
self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
self.shell.showtraceback()
return
except Exception:
self.log.warning("GUI event loop or pylab initialization failed")
self.shell.showtraceback()
return
if isinstance(r, tuple):
gui, backend = r[:2]
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s, matplotlib=%s", gui, backend)
if key == "auto":
print("Using matplotlib backend: %s" % backend)
else:
gui = r
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s", gui)
def init_extensions(self):
"""Load all IPython extensions in IPythonApp.extensions.
This uses the :meth:`ExtensionManager.load_extensions` to load all
the extensions listed in ``self.extensions``.
"""
try:
self.log.debug("Loading IPython extensions...")
extensions = self.default_extensions + self.extensions
if self.extra_extension:
extensions.append(self.extra_extension)
for ext in extensions:
try:
self.log.info("Loading IPython extension: %s" % ext)
self.shell.extension_manager.load_extension(ext)
except:
if self.reraise_ipython_extension_failures:
raise
msg = ("Error in loading extension: {ext}\n"
"Check your config files in {location}".format(
ext=ext,
location=self.profile_dir.location
))
self.log.warning(msg, exc_info=True)
except:
if self.reraise_ipython_extension_failures:
raise
self.log.warning("Unknown error in loading extensions:", exc_info=True)
def init_code(self):
"""run the pre-flight code, specified via exec_lines"""
self._run_startup_files()
self._run_exec_lines()
self._run_exec_files()
# Hide variables defined here from %who etc.
if self.hide_initial_ns:
self.shell.user_ns_hidden.update(self.shell.user_ns)
# command-line execution (ipython -i script.py, ipython -m module)
# should *not* be excluded from %whos
self._run_cmd_line_code()
self._run_module()
# flush output, so itwon't be attached to the first cell
sys.stdout.flush()
sys.stderr.flush()
def _run_exec_lines(self):
"""Run lines of code in IPythonApp.exec_lines in the user's namespace."""
if not self.exec_lines:
return
try:
self.log.debug("Running code from IPythonApp.exec_lines...")
for line in self.exec_lines:
try:
self.log.info("Running code in user namespace: %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user "
"namespace: %s" % line)
self.shell.showtraceback()
except:
self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
self.shell.showtraceback()
def _exec_file(self, fname, shell_futures=False):
try:
full_filename = filefind(fname, [u'.', self.ipython_dir])
except IOError:
self.log.warning("File not found: %r"%fname)
return
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv
sys.argv = [full_filename] + self.extra_args[1:]
try:
if os.path.isfile(full_filename):
self.log.info("Running file in user namespace: %s" %
full_filename)
# Ensure that __file__ is always defined to match Python
# behavior.
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = fname
if full_filename.endswith('.ipy') or full_filename.endswith('.ipynb'):
self.shell.safe_execfile_ipy(full_filename,
shell_futures=shell_futures)
else:
# default to python, even without extension
self.shell.safe_execfile(full_filename,
self.shell.user_ns,
shell_futures=shell_futures,
raise_exceptions=True)
finally:
sys.argv = save_argv
def _run_startup_files(self):
"""Run files from profile startup directory"""
startup_dirs = [self.profile_dir.startup_dir] + [
os.path.join(p, 'startup') for p in chain(ENV_CONFIG_DIRS, SYSTEM_CONFIG_DIRS)
]
startup_files = []
if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
not (self.file_to_run or self.code_to_run or self.module_to_run):
python_startup = os.environ['PYTHONSTARTUP']
self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
try:
self._exec_file(python_startup)
except:
self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
self.shell.showtraceback()
for startup_dir in startup_dirs[::-1]:
startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
if not startup_files:
return
self.log.debug("Running startup files from %s...", startup_dir)
try:
for fname in sorted(startup_files):
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling startup files:")
self.shell.showtraceback()
def _run_exec_files(self):
"""Run files from IPythonApp.exec_files"""
if not self.exec_files:
return
self.log.debug("Running files in IPythonApp.exec_files...")
try:
for fname in self.exec_files:
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling IPythonApp.exec_files:")
self.shell.showtraceback()
def _run_cmd_line_code(self):
"""Run code or file specified at the command-line"""
if self.code_to_run:
line = self.code_to_run
try:
self.log.info("Running code given at command line (c=): %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user namespace: %s" %
line)
self.shell.showtraceback()
if not self.interact:
self.exit(1)
# Like Python itself, ignore the second if the first of these is present
elif self.file_to_run:
fname = self.file_to_run
if os.path.isdir(fname):
fname = os.path.join(fname, "__main__.py")
if not os.path.exists(fname):
self.log.warning("File '%s' doesn't exist", fname)
if not self.interact:
self.exit(2)
try:
self._exec_file(fname, shell_futures=True)
except:
self.shell.showtraceback(tb_offset=4)
if not self.interact:
self.exit(1)
def _run_module(self):
"""Run module specified at the command-line."""
if self.module_to_run:
# Make sure that the module gets a proper sys.argv as if it were
# run using `python -m`.
save_argv = sys.argv
sys.argv = [sys.executable] + self.extra_args
try:
self.shell.safe_run_module(self.module_to_run,
self.shell.user_ns)
finally:
sys.argv = save_argv
| mit |
nvoron23/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
evanbiederstedt/RRBSfun | trees/chrom_scripts/normal_chr12.py | 1 | 25844 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
totalfiles = normalB + mcell + pcell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr12"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_chrom12.phy", header=None, index=None)
print(tott.shape)
| mit |
ajauhri/bignum_compression | comparison_plots.py | 1 | 3868 | #! /usr/bin/env python
# Copyright (c) 2018, Abhinav Jauhri, Martin Griss, Hakan Erdogmus
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see the LICENSE file in the repo root
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import pandas
from collections import defaultdict
from pylab import *
import sys
import os
def plot_best(t, labels):
df = pandas.read_csv('results/all.csv', skiprows=[1])
big = []
golomb = []
vsimple = []
simd = []
x = []
golomb_dict = defaultdict(list)
big_dict = defaultdict(list)
vsimple_dict = defaultdict(list)
simd_dict = defaultdict(list)
for count, row in df.iterrows():
if t == 'delta_min':
arr_len = row['points'] * 2
x.append(row['points']*2)
elif t == 'delta':
arr_len = (row['points']-1) * 2
x.append(arr_len)
big.append(row['big_' + t + '_bits'])
big_dict[arr_len].append(big[-1])
golomb.append(row['golomb_' + t + '_bits'])
golomb_dict[arr_len].append(golomb[-1])
if os.path.isfile('results/' + t + '_state_of_art/' + str(count+1) + '.out'):
fd = file('results/' + t + '_state_of_art/' + str(count+1) + '.out', 'r')
for line in fd.readlines():
if 'VSimple' in line:
vsimple.append(float(line.split()[3]))
if 'SIMDPackFPF' in line:
simd.append(float(line.split()[3]))
fd.close()
vsimple_dict[arr_len].append(vsimple[-1])
simd_dict[arr_len].append(simd[-1])
else:
print 'error'
break
x = np.array(x)
big = np.array(big)
golomb = np.array(golomb)
vsimple = np.array(vsimple)
simd = np.array(simd)
ind = np.argsort(x)
x = x[ind]
big = big[ind]
golomb = golomb[ind]
simd = simd[ind]
vsimple = vsimple[ind]
colors = cm.rainbow(np.linspace(0, 1, 5))
big_l = plt.scatter(x, big, color=colors[3], alpha=1, marker='d', s=10)
golomb_l = plt.scatter(x+.4, golomb, color='green', alpha=1, marker='+', s=10)
vsimple_l = plt.scatter(x-.4, vsimple, color=colors[0], alpha=1, marker='s', s =10)
simd_l = plt.scatter(x-.8, simd, color=colors[4], alpha=1, marker='v', s=10)
plt.legend((big_l, golomb_l, vsimple_l, simd_l), labels, loc='upper left')
plt.ylim(0, 25)
plt.xlim(0, 50)
plt.xlabel('number of integers', fontsize=22)
plt.ylabel('bits/integer', fontsize=22)
plt.tight_layout()
savefig('results/best_' + t + '.png')
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.ylim(0, 25)
plt.xlim(0, 50)
plt.xlabel('number of integers', fontsize=22)
plt.ylabel('bits/integer', fontsize=22)
plt.tight_layout()
for k,v in big_dict.iteritems():
ax.errorbar(k, np.mean(big_dict[k]), np.std(big_dict[k]), linestyle='None', marker='d', color=colors[3], label=labels[0])
ax.errorbar(k+.4, np.mean(golomb_dict[k]), np.std(golomb_dict[k]), linestyle='None', marker='+', color='green', label=labels[1])
ax.errorbar(k-.4, np.mean(vsimple_dict[k]), np.std(vsimple_dict[k]), linestyle='None', marker='s', color=colors[0], label=labels[2])
ax.errorbar(k-.8, np.mean(simd_dict[k]), np.std(simd_dict[k]), linestyle='None', marker='v', color=colors[4], label=labels[3])
handles, _ = ax.get_legend_handles_labels()
handles = [h[0] for h in handles]
ax.legend(handles, labels, loc='upper left')
savefig('results/best2_' + t + '.png')
plt.clf()
plot_best('delta', (r'$BIG^{\Delta}$', r'$GOL^{\Delta}$', r'$VSimple^{\Delta}$', r'$SIMDPackFPF^{\Delta}$'))
plot_best('delta_min', (r'$BIG^{\Delta min}$', r'$GOL^{\Delta min}$', r'$VSimple^{\Delta min}$', r'$SIMDPackFPF^{\Delta min}$'))
| bsd-3-clause |
brityboy/BotBoosted | src/load_train_data.py | 1 | 8416 | import numpy as np
import pandas as pd
import csv
from collections import Counter
"""
This module is used to host the different functions that load the training
data into a single manageable dataframe.
These are a series of helper functions that extract the raw information from
the training data csv files that were put together by Stefano Cresci,
Roberto Di Pietro, Marinella Petrocchi, Angelo Spognardi, and Maurizio
Tesconi for their paper "Fame for sale: efficient detection of fake Twitter
followers." http://mib.projects.iit.cnr.it/dataset.html is the link to their
data. The main orientation of this script is to compile the different csv
files that this research team put together into one single and properly
labeled csv.
As the dataset I used for this project may not be shared, this module
only demonstrates the the kind of features and transformations I did with
this dataset so that should there be anyone who wishes to pursue further
research in this area, a method already exists for collecting the data
into a single pandas dataframe
Example:
file_list = human_users+fake_users
checkdata = get_first_row_of_all_csv_files_in_a_list(file_list)
column_list = get_intersection_columns_for_different_csv_files(checkdata)
df = extract_columns_from_multiple_csvs(column_list,
file_list)
df.to_csv('data/training_users.csv')
file_list = human_tweets+fake_tweets
checkdata = get_first_row_of_all_csv_files_in_a_list(file_list)
column_list = get_intersection_columns_for_different_csv_files(checkdata)
df = extract_columns_from_multiple_csvs(column_list,
file_list)
df.to_csv('data/training_tweets.csv')
"""
ds1_genuine_tweets = 'data/datasets_full.csv/genuine_accounts.csv/tweets.csv'
ds1_genuine_users = 'data/datasets_full.csv/genuine_accounts.csv/users.csv'
ds1_sb1_tweets = 'data/datasets_full.csv/social_spambots_1.csv/tweets.csv'
ds1_sb1_users = 'data/datasets_full.csv/social_spambots_1.csv/users.csv'
ds1_sb2_tweets = 'data/datasets_full.csv/social_spambots_2.csv/tweets.csv'
ds1_sb2_users = 'data/datasets_full.csv/social_spambots_2.csv/users.csv'
ds1_sb3_tweets = 'data/datasets_full.csv/social_spambots_3.csv/tweets.csv'
ds1_sb3_users = 'data/datasets_full.csv/social_spambots_3.csv/users.csv'
ds1_ts1_tweets = 'data/datasets_full.csv/traditional_spambots_1.csv/tweets.csv'
ds1_ts1_users = 'data/datasets_full.csv/traditional_spambots_1.csv/users.csv'
ds1_ts2_tweets = 'data/datasets_full.csv/traditional_spambots_2.csv/tweets.csv'
ds1_ts2_users = 'data/datasets_full.csv/traditional_spambots_2.csv/users.csv'
ds1_ts3_tweets = 'data/datasets_full.csv/traditional_spambots_3.csv/tweets.csv'
ds1_ts3_users = 'data/datasets_full.csv/traditional_spambots_3.csv/users.csv'
ds1_ts4_tweets = 'data/datasets_full.csv/traditional_spambots_4.csv/tweets.csv'
ds1_ts4_users = 'data/datasets_full.csv/traditional_spambots_4.csv/users.csv'
ds1_ff_tweets = 'data/datasets_full.csv/fake_followers.csv/tweets.csv'
ds1_ff_users = 'data/datasets_full.csv/fake_followers.csv/users.csv'
ds2_tfp_tweets = 'data/TFP.csv/tweets.csv'
ds2_tfp_users = 'data/TFP.csv/users.csv'
ds2_e13_tweets = 'data/E13.csv/tweets.csv'
ds2_e13_users = 'data/E13.csv/users.csv'
ds2_fsf_tweets = 'data/FSF.csv/tweets.csv'
ds2_fsf_users = 'data/FSF.csv/users.csv'
ds2_int_tweets = 'data/INT.csv/tweets.csv'
ds2_int_users = 'data/INT.csv/users.csv'
ds2_twt_tweets = 'data/TWT.csv/tweets.csv'
ds2_twt_users = 'data/TWT.csv/users.csv'
human_tweets = [ds1_genuine_tweets, ds2_e13_tweets, ds2_tfp_tweets]
fake_tweets = [ds1_sb1_tweets, ds1_sb2_tweets, ds1_sb3_tweets,
ds1_ts1_tweets, ds2_fsf_tweets, ds2_int_tweets,
ds2_twt_tweets]
human_users = [ds1_genuine_users, ds2_e13_users, ds2_tfp_users]
fake_users = [ds1_sb1_users, ds1_sb2_users, ds1_sb3_users, ds1_ts1_users,
ds2_fsf_users, ds2_int_users, ds2_twt_users]
filename_dict = {ds1_genuine_tweets: 'hum_tw', ds2_e13_tweets: "e13_tw",
ds2_tfp_tweets: "tfp_tw", ds1_sb1_tweets: "sb1_tw",
ds1_sb2_tweets: "sb2_tw", ds1_sb3_tweets: "sb3_tw",
ds1_ts1_tweets: "ts1_tw", ds2_fsf_tweets: "fsf_tw",
ds2_int_tweets: "int_tw", ds2_twt_tweets: "twt_tw",
ds1_genuine_users: "hum1_us", ds2_e13_users: "e13_us",
ds2_tfp_users: "tfp_us", ds1_sb1_users: "sb1_us",
ds1_sb2_users: "sb2_us", ds1_sb3_users: "sb3_us",
ds1_ts1_users: "ts1_us", ds2_fsf_users: "fsf_us",
ds2_int_users: "int_us", ds2_twt_users: "twt_us"}
label_dict = {ds1_genuine_tweets: 0, ds2_e13_tweets: 0,
ds2_tfp_tweets: 0, ds1_sb1_tweets: 1,
ds1_sb2_tweets: 1, ds1_sb3_tweets: 1,
ds1_ts1_tweets: 1, ds2_fsf_tweets: 1,
ds2_int_tweets: 1, ds2_twt_tweets: 1,
ds1_genuine_users: 0, ds2_e13_users: 0,
ds2_tfp_users: 0, ds1_sb1_users: 1,
ds1_sb2_users: 1, ds1_sb3_users: 1,
ds1_ts1_users: 1, ds2_fsf_users: 1,
ds2_int_users: 1, ds2_twt_users: 1}
def open_csv_file_as_dataframe(filename):
"""
Args:
filename (str): this is the name of the file that will be opened
as a dataframe
Returns
df (pandas DataFrame): contents of csv file, null bytes and other
items removed in a dataframe, with column headers which is in the
first row of each file
"""
text_list = []
with open(filename, 'r') as csvfile:
opencsvfile = csv.reader(x.replace('\0', '').replace('\n', '')
for x in csvfile)
for row in opencsvfile:
text_list.append(row)
columns = text_list[0]
df = pd.DataFrame(text_list[1:], columns=columns)
return df
def get_first_row_of_all_csv_files_in_a_list(file_list):
"""
Args:
file_list (list): list of csv files that will be processed
Returns:
output_dict (dictionary): where the keys are the columns of the
different text files and the values are the number of files
these columns occur in
"""
output_list = []
for file_name in file_list:
with open(file_name, 'r') as f:
first_line = f.readline()
first_line = first_line.replace('"', ''). \
replace('\n', '').replace('\r', '').split(',')
output_list += first_line
output_dict = Counter(output_list)
return output_dict
def extract_columns_from_multiple_csvs(column_list, csv_list):
"""
Args:
column_list (list): list of columns to extract from the different csvs
csv_list (list): list of the different csvs to get the data from
Returns
compiled_df (pandas DataFrame): a dataframe that has all the
columns from the different csvs
"""
compiled_df = pd.DataFrame(columns=np.append(column_list,
['file', 'label']))
for csv_file in csv_list:
print(csv_file)
df = open_csv_file_as_dataframe(csv_file)
df.columns = [c.replace('\n', '').replace('\r',
'') for c in df.columns]
df = df[column_list]
df['file'] = filename_dict[csv_file]
df['label'] = label_dict[csv_file]
compiled_df = pd.concat([compiled_df, df])
return compiled_df
def get_intersection_columns_for_different_csv_files(output_dict):
"""
Args:
output_dict (dictionary): a dictionary that has the keys as
columns and the values as the number of csv files they occur in
Returns:
column_list (list): a list of columns that has the columns which occur
in all csv files that will be loaded into a dataframe
"""
column_list = []
maxval = max(output_dict.values())
for k, v in output_dict.iteritems():
if v == maxval:
column_list.append(k)
return column_list
if __name__ == "__main__":
file_list = human_users+fake_users
checkdata = get_first_row_of_all_csv_files_in_a_list(file_list)
column_list = get_intersection_columns_for_different_csv_files(checkdata)
df = extract_columns_from_multiple_csvs(column_list,
file_list)
df.to_csv('data/training_users.csv')
| mit |
ky822/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
34383c/pyNeuroML | pyneuroml/analysis/NML2ChannelAnalysis.py | 1 | 23536 | #!/usr/bin/env python
#
# A script which can be run to generate a LEMS file to analyse the behaviour
# of channels in NeuroML 2
#
import sys
import os
import os.path
import argparse
import pprint
import glob
import re
import neuroml.loaders as loaders
from pyneuroml.pynml import run_lems_with_jneuroml, print_comment_v, read_neuroml2_file
import airspeed
import matplotlib.pyplot as plt
pp = pprint.PrettyPrinter(depth=4)
OUTPUT_DIR = os.getcwd()
TEMPLATE_FILE = "%s/LEMS_Test_TEMPLATE.xml" % (os.path.dirname(__file__))
HTML_TEMPLATE_FILE = "%s/ChannelInfo_TEMPLATE.html" % \
(os.path.dirname(__file__))
MD_TEMPLATE_FILE = "%s/ChannelInfo_TEMPLATE.md" % \
(os.path.dirname(__file__))
V = "rampCellPop0[0]/v" # Key for voltage trace in results dictionary.
MAX_COLOUR = (255, 0, 0)
MIN_COLOUR = (255, 255, 0)
DEFAULTS = {'v': False,
'minV': -100,
'maxV': 100,
'temperature': 6.3,
'duration': 100,
'clampDelay': 10,
'clampDuration': 80,
'clampBaseVoltage': -70,
'stepTargetVoltage': 20,
'erev': 0,
'caConc': 5e-5,
'datSuffix': '',
'ivCurve': False,
'norun': False,
'nogui': False,
'html': False,
'md': False}
def process_args():
"""
Parse command-line arguments.
"""
parser = argparse.ArgumentParser(
description=("A script which can be run to generate a LEMS "
"file to analyse the behaviour of channels in "
"NeuroML 2"))
parser.add_argument('channelFiles',
type=str,
nargs='+',
metavar='<NeuroML 2 Channel file>',
help="Name of the NeuroML 2 file(s)")
parser.add_argument('-v',
action='store_true',
default=DEFAULTS['v'],
help="Verbose output")
parser.add_argument('-minV',
type=int,
metavar='<min v>',
default=DEFAULTS['minV'],
help="Minimum voltage to test (integer, mV)")
parser.add_argument('-maxV',
type=int,
metavar='<max v>',
default=DEFAULTS['maxV'],
help="Maximum voltage to test (integer, mV)")
parser.add_argument('-temperature',
type=float,
metavar='<temperature>',
default=DEFAULTS['temperature'],
help="Temperature (float, celsius)")
parser.add_argument('-duration',
type=float,
metavar='<duration>',
default=DEFAULTS['duration'],
help="Duration of simulation in ms")
parser.add_argument('-clampDelay',
type=float,
metavar='<clamp delay>',
default=DEFAULTS['clampDelay'],
help="Delay before voltage clamp is activated in ms")
parser.add_argument('-clampDuration',
type=float,
metavar='<clamp duration>',
default=DEFAULTS['clampDuration'],
help="Duration of voltage clamp in ms")
parser.add_argument('-clampBaseVoltage',
type=float,
metavar='<clamp base voltage>',
default=DEFAULTS['clampBaseVoltage'],
help="Clamp base (starting/finishing) voltage in mV")
parser.add_argument('-stepTargetVoltage',
type=float,
metavar='<step target voltage>',
default=DEFAULTS['stepTargetVoltage'],
help=("Voltage in mV through which to step voltage "
"clamps"))
parser.add_argument('-erev',
type=float,
metavar='<reversal potential>',
default=DEFAULTS['erev'],
help="Reversal potential of channel for currents")
parser.add_argument('-caConc',
type=float,
metavar='<Ca2+ concentration>',
default=DEFAULTS['caConc'],
help=("Internal concentration of Ca2+ (float, "
"concentration in mM)"))
parser.add_argument('-datSuffix',
type=str,
metavar='<dat suffix>',
default=DEFAULTS['datSuffix'],
help="String to add to dat file names (before .dat)")
parser.add_argument('-norun',
action='store_true',
default=DEFAULTS['norun'],
help=("If used, just generate the LEMS file, "
"don't run it"))
parser.add_argument('-nogui',
action='store_true',
default=DEFAULTS['nogui'],
help=("Supress plotting of variables and only save "
"data to file"))
parser.add_argument('-html',
action='store_true',
default=DEFAULTS['html'],
help=("Generate a HTML page featuring the plots for the "
"channel"))
parser.add_argument('-md',
action='store_true',
default=DEFAULTS['md'],
help=("Generate a (GitHub flavoured) Markdown page featuring the plots for the "
"channel"))
parser.add_argument('-ivCurve',
action='store_true',
default=DEFAULTS['ivCurve'],
help=("Save currents through voltage clamp at each "
"level & plot current vs voltage for ion "
"channel"))
return parser.parse_args()
def get_colour_hex(fract):
rgb = [ hex(int(x + (y-x)*fract)) for x, y in zip(MIN_COLOUR, MAX_COLOUR) ]
col = "#"
for c in rgb: col+= ( c[2:4] if len(c)==4 else "00")
return col
# Better off elsewhere
def get_ion_color(ion):
if ion.lower()=='na': col='#1E90FF'
elif ion.lower()=='k': col='#CD5C5C'
elif ion.lower()=='ca': col='#8FBC8F'
else: col='#A9A9A9'
return col
def get_state_color(s):
col='#000000'
if s.startswith('m'): col='#FF0000'
if s.startswith('k'): col='#FF0000'
if s.startswith('r'): col='#FF0000'
if s.startswith('h'): col='#00FF00'
if s.startswith('l'): col='#00FF00'
if s.startswith('n'): col='#0000FF'
if s.startswith('a'): col='#FF0000'
if s.startswith('b'): col='#00FF00'
if s.startswith('c'): col='#0000FF'
if s.startswith('q'): col='#FF00FF'
if s.startswith('e'): col='#00FFFF'
if s.startswith('f'): col='#DDDD00'
if s.startswith('p'): col='#880000'
if s.startswith('s'): col='#888800'
if s.startswith('u'): col='#880088'
return col
def merge_with_template(model, templfile):
if not os.path.isfile(templfile):
templfile = os.path.join(os.path.dirname(sys.argv[0]), templfile)
with open(templfile) as f:
templ = airspeed.Template(f.read())
return templ.merge(model)
def generate_lems_channel_analyser(channel_file, channel, min_target_voltage,
step_target_voltage, max_target_voltage, clamp_delay,
clamp_duration, clamp_base_voltage, duration, erev,
gates, temperature, ca_conc, iv_curve, dat_suffix=''):
print_comment_v("Generating LEMS file to investigate %s in %s, %smV->%smV, %sdegC"%(channel, \
channel_file, min_target_voltage, max_target_voltage, temperature))
target_voltages = []
v = min_target_voltage
while v <= max_target_voltage:
target_voltages.append(v)
v+=step_target_voltage
target_voltages_map = []
for t in target_voltages:
fract = float(target_voltages.index(t)) / (len(target_voltages)-1)
info = {}
info["v"] = t
info["v_str"] = str(t).replace("-", "min")
info["col"] = get_colour_hex(fract)
target_voltages_map.append(info)
includes = get_includes_from_channel_file(channel_file)
includes_relative = []
base_path = os.path.dirname(channel_file)
for inc in includes:
includes_relative.append(os.path.abspath(base_path+'/'+inc))
model = {"channel_file": channel_file,
"includes": includes_relative,
"channel": channel,
"target_voltages" : target_voltages_map,
"clamp_delay": clamp_delay,
"clamp_duration": clamp_duration,
"clamp_base_voltage": clamp_base_voltage,
"min_target_voltage": min_target_voltage,
"max_target_voltage": max_target_voltage,
"duration": duration,
"erev": erev,
"gates": gates,
"temperature": temperature,
"ca_conc": ca_conc,
"iv_curve": iv_curve,
"dat_suffix": dat_suffix}
#pp.pprint(model)
merged = merge_with_template(model, TEMPLATE_FILE)
return merged
def convert_case(name):
"""Converts from camelCase to under_score"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def get_channels_from_channel_file(channel_file):
doc = read_neuroml2_file(channel_file, include_includes=True, verbose=False, already_included=[])
channels = list(doc.ion_channel_hhs.__iter__()) + \
list(doc.ion_channel.__iter__())
for channel in channels:
setattr(channel,'file',channel_file)
if not hasattr(channel,'notes'):
setattr(channel,'notes','')
return channels
def get_includes_from_channel_file(channel_file):
doc = read_neuroml2_file(channel_file)
includes = []
for incl in doc.includes:
includes.append(incl.href)
return includes
def process_channel_file(channel_file,a):
## Get name of channel mechanism to test
if a.v:
print_comment_v("Going to test channel from file: "+ channel_file)
if not os.path.isfile(channel_file):
raise IOError("File could not be found: %s!\n" % channel_file)
channels = get_channels_from_channel_file(channel_file)
channels_info = []
for channel in channels:
if len(get_channel_gates(channel)) == 0:
print_comment_v("Skipping %s in %s as it has no channels (probably passive conductance)"%(channel.id,channel_file))
else:
new_lems_file = make_lems_file(channel,a)
if not a.norun:
results = run_lems_file(new_lems_file,a.v)
if a.iv_curve:
iv_data = compute_iv_curve(channel,(a.clamp_delay + a.clamp_duration),results)
else:
iv_data = None
if not a.nogui and not a.norun:
plot_channel(channel,a,results,iv_data=iv_data)
channel_info = {key:getattr(channel,key) for key in ['id','file','notes', 'species']}
channel_info['expression'] = get_conductance_expression(channel)
channel_info['ion_color'] = get_ion_color(channel.species)
channels_info.append(channel_info)
return channels_info
def get_channel_gates(channel):
channel_gates = []
for gates in ['gates','gate_hh_rates','gate_hh_tau_infs', 'gate_hh_instantaneouses']:
channel_gates += [g.id for g in getattr(channel,gates)]
return channel_gates
def get_conductance_expression(channel):
expr = 'g = gmax '
for gates in ['gates','gate_hh_rates','gate_hh_tau_infs', 'gate_hh_instantaneouses']:
for g in getattr(channel,gates):
instances = int(g.instances)
expr += '* %s<sup>%s</sup> '%(g.id, g.instances) if instances >1 else '* %s '%(g.id)
return expr
def make_lems_file(channel,a):
gates = get_channel_gates(channel)
lems_content = generate_lems_channel_analyser(
channel.file, channel.id, a.min_v,
a.step_target_voltage, a.max_v, a.clamp_delay,
a.clamp_duration, a.clamp_base_voltage, a.duration,
a.erev, gates, a.temperature, a.ca_conc, a.iv_curve, a.dat_suffix)
new_lems_file = os.path.join(OUTPUT_DIR,
"LEMS_Test_%s.xml" % channel.id)
lf = open(new_lems_file, 'w')
lf.write(lems_content)
lf.close()
if a.v:
print_comment_v("Written generated LEMS file to %s\n" % new_lems_file)
return new_lems_file
def run_lems_file(lems_file,verbose):
results = run_lems_with_jneuroml(lems_file,
nogui=True,
load_saved_data=True,
plot=False,
verbose=verbose)
return results
def plot_channel(channel,a,results,iv_data=None,grid=True):
plot_kinetics(channel,a,results,grid=grid)
plot_steady_state(channel,a,results,grid=grid)
if iv_data:
plot_iv_curves(channel,a,iv_data)
def plot_kinetics(channel,a,results,grid=True):
fig = plt.figure()
fig.canvas.set_window_title(("Time Course(s) of activation variables of "
"%s from %s at %s degC")
% (channel.id, channel.file, a.temperature))
plt.xlabel('Membrane potential (mV)')
plt.ylabel('Time Course - tau (ms)')
plt.grid('on' if grid else 'off')
t0 = 1e6
t1 = -1e6
gates = get_channel_gates(channel)
for g in gates:
g_tau = "rampCellPop0[0]/test/%s/%s/tau" \
% (channel.id, g) # Key for conductance variable.
col = get_state_color(g)
plt.plot([v*1000 for v in results[V]], [t*1000 for t in results[g_tau]], color=col,
linestyle='-', label="%s %s tau" % (channel.id, g))
plt.xlim([results[V][0]*1100,results[V][-1]*1100])
t0 = min(t0, min(results[g_tau])*1000)
t1 = max(t1, max(results[g_tau])*1000)
if t0==t1: t0 = 0
plt.ylim([t0-((t1-t0)/10),t1+((t1-t0)/10)])
leg = plt.legend(fancybox=True)
leg.get_frame().set_alpha(0.5)
if a.html or a.md:
save_fig('%s.tau.png' % channel.id)
def plot_steady_state(channel,a,results,grid=True):
fig = plt.figure()
fig.canvas.set_window_title(("Steady state(s) of activation variables of "
"%s from %s at %s degC")
% (channel.id, channel.file, a.temperature))
plt.xlabel('Membrane potential (mV)')
plt.ylabel('Steady state - inf')
plt.grid('on' if grid else 'off')
gates = get_channel_gates(channel)
for g in gates:
g_inf = "rampCellPop0[0]/test/%s/%s/inf" \
% (channel.id, g)
col = get_state_color(g)
plt.plot([v*1000 for v in results[V]], results[g_inf], color=col,
linestyle='-', label="%s %s inf" % (channel.id, g))
plt.xlim([results[V][0]*1100,results[V][-1]*1100])
plt.ylim([-0.05,1.05])
leg = plt.legend(fancybox=True, loc='center right')
leg.get_frame().set_alpha(0.5)
if a.html or a.md:
save_fig('%s.inf.png' % channel.id)
def save_fig(name):
overview_dir = make_overview_dir()
png_path = os.path.join(overview_dir,name)
plt.savefig(png_path,bbox_inches='tight')
def make_overview_dir():
overview_dir = os.path.join(OUTPUT_DIR,'channel_summary')
if not os.path.isdir(overview_dir):
os.makedirs(overview_dir)
return overview_dir
def compute_iv_curve(channel,end_time_ms,results,grid=True):
# Based on work by Rayner Lucas here:
# https://github.com/openworm/
# BlueBrainProjectShowcase/blob/master/
# Channelpedia/iv_analyse.py
dat_path = os.path.join(OUTPUT_DIR,
'%s.i_*.lems.dat' % channel.id)
file_names = glob.glob(dat_path)
i_peak = {}
i_steady = {}
hold_v = []
currents = {}
times = {}
for file_name in file_names:
name = os.path.split(file_name)[1] # Filename without path.
v_match = re.match("%s.i_(.*)\.lems\.dat" \
% channel.id, name)
voltage = v_match.group(1)
voltage = voltage.replace("min", "-")
voltage = float(voltage)/1000
hold_v.append(voltage)
times[voltage] = []
currents[voltage] = []
i_file = open(name)
i_max = -1*sys.float_info.min
i_min = sys.float_info.min
i_steady[voltage] = None
t_steady_end = end_time_ms/1000.0
for line in i_file:
t = float(line.split()[0])
times[voltage].append(t)
i = float(line.split()[1])
currents[voltage].append(i)
if i>i_max: i_max = i
if i<i_min: i_min = i
if t < t_steady_end:
i_steady[voltage] = i
i_peak_ = i_max if abs(i_max) > abs(i_min)\
else i_min
i_peak[voltage] = -1 * i_peak_
hold_v.sort()
iv_file = open('%s.i_peak.dat' % channel.id,'w')
for v in hold_v:
iv_file.write("%s\t%s\n" % (v,i_peak[v]))
iv_file.close()
iv_file = open('%s.i_steady.dat' % channel.id,'w')
for v in hold_v:
iv_file.write("%s\t%s\n" % (v,i_steady[v]))
iv_file.close()
items = ['hold_v','times','currents','i_steady','i_peak']
locals_ = locals().copy()
iv_data = {item:locals_[item] for item in items}
return iv_data
def plot_iv_curves(channel,a,iv_data,grid=True):
x = iv_data
plot_iv_curve_vm(channel,a,x['hold_v'],x['times'],x['currents'],grid=grid)
plot_iv_curve(a,x['hold_v'],x['i_peak'],
grid=grid,label="Peak currents")
plot_iv_curve(a,x['hold_v'],x['i_steady'],
grid=grid,label="Steady state currents")
def plot_iv_curve_vm(channel,a,hold_v,times,currents,grid=True):
# Holding potentials
fig = plt.figure()
fig.canvas.set_window_title(("Currents through voltage clamp for %s "
"from %s at %s degC, erev: %s V")
% (channel.id, channel.file,
a.temperature, a.erev))
plt.xlabel('Time (s)')
plt.ylabel('Current (A)')
plt.grid('on' if grid else 'off')
for v in hold_v:
col = get_colour_hex(
float(hold_v.index(v))/len(hold_v))
plt.plot(times[v], currents[v], color=col,
linestyle='-', label="%s V" % v)
plt.legend()
def make_iv_curve_fig(a,grid=True):
fig = plt.figure()
fig.canvas.set_window_title(
"Currents vs. holding potentials at erev = %s V"\
% a.erev)
plt.xlabel('Membrane potential (V)')
plt.ylabel('Current (A)')
plt.grid('on' if grid else 'off')
def plot_iv_curve(a,hold_v,i,grid=True,same_fig=False,**plot_args):
# A single IV curve.
if not same_fig:
make_iv_curve_fig(a,grid=grid)
if type(i) is dict:
i = [i[v] for v in hold_v]
if 'label' not in plot_args:
plot_args['label'] = 'Current'
plt.plot(hold_v, i, 'ko-', **plot_args)
plt.legend(loc=2)
def make_html_file(info):
merged = merge_with_template(info, HTML_TEMPLATE_FILE)
html_dir = make_overview_dir()
new_html_file = os.path.join(html_dir,'ChannelInfo.html')
lf = open(new_html_file, 'w')
lf.write(merged)
lf.close()
print_comment_v('Written HTML info to: %s' % new_html_file)
def make_md_file(info):
merged = merge_with_template(info, MD_TEMPLATE_FILE)
md_dir = make_overview_dir()
new_md_file = os.path.join(md_dir,'README.md')
lf = open(new_md_file, 'w')
lf.write(merged)
lf.close()
print_comment_v('Written Markdown info to: %s' % new_md_file)
def build_namespace(a=None,**kwargs):
if a is None:
a = argparse.Namespace()
# Add arguments passed in by keyword.
for key,value in kwargs.items():
setattr(a,key,value)
# Add defaults for arguments not provided.
for key,value in DEFAULTS.items():
if not hasattr(a,key):
setattr(a,key,value)
# Change all values to under_score from camelCase.
for key,value in a.__dict__.items():
new_key = convert_case(key)
if new_key != key:
setattr(a,new_key,value)
delattr(a,key)
return a
def main(args=None):
if args is None:
args = process_args()
run(a=args)
def run(a=None,**kwargs):
a = build_namespace(a,**kwargs)
#if (not a.nogui) or a.html:
# print('mpl')
info = {'info': ("Channel information at: "
"T = %s degC, "
"E_rev = %s mV, "
"[Ca2+] = %s mM") % (a.temperature, a.erev, a.ca_conc),
'channels': []}
na_chan_files = []
k_chan_files = []
ca_chan_files = []
other_chan_files = []
if len(a.channel_files) > 0:
for channel_file in a.channel_files:
channels = get_channels_from_channel_file(channel_file)
#TODO look past 1st channel...
if channels[0].species == 'na':
na_chan_files.append(channel_file)
elif channels[0].species == 'k':
k_chan_files.append(channel_file)
elif channels[0].species == 'ca':
ca_chan_files.append(channel_file)
else:
other_chan_files.append(channel_file)
channel_files = na_chan_files + k_chan_files + ca_chan_files + other_chan_files
print_comment_v("\nAnalysing channels from files: %s\n"%channel_files)
for channel_file in channel_files:
channels_info = process_channel_file(channel_file,a)
for channel_info in channels_info:
info['channels'].append(channel_info)
if not a.nogui and not a.html and not a.md:
plt.show()
else:
if a.html:
make_html_file(info)
if a.md:
make_md_file(info)
if __name__ == '__main__':
main()
| lgpl-3.0 |
bdestombe/flopy-1 | flopy/utils/observationfile.py | 3 | 12481 |
import numpy as np
from ..utils.utils_def import FlopyBinaryData
class ObsFiles(FlopyBinaryData):
def __init__(self):
super(ObsFiles, self).__init__()
return
def get_times(self):
"""
Get a list of unique times in the file
Returns
----------
out : list of floats
List contains unique simulation times (totim) in binary file.
"""
return self.data['totim'].reshape(self.get_ntimes()).tolist()
def get_ntimes(self):
"""
Get the number of times in the file
Returns
----------
out : int
The number of simulation times (totim) in binary file.
"""
return self.data['totim'].shape[0]
def get_nobs(self):
"""
Get the number of observations in the file
Returns
----------
out : tuple of int
A tupe with the number of records and number of flow items
in the file. The number of flow items is non-zero only if
swrtype='flow'.
"""
return self.nobs
def get_obsnames(self):
"""
Get a list of observation names in the file
Returns
----------
out : list of strings
List of observation names in the binary file. totim is not
included in the list of observation names.
"""
return list(self.data.dtype.names[1:])
def get_data(self, idx=None, obsname=None, totim=None):
"""
Get data from the observation file.
Parameters
----------
idx : int
The zero-based record number. The first record is record 0.
If idx is None and totim are None, data for all simulation times
are returned. (default is None)
obsname : string
The name of the observation to return. If obsname is None, all
observation data are returned. (default is None)
totim : float
The simulation time to return. If idx is None and totim are None,
data for all simulation times are returned. (default is None)
Returns
----------
data : numpy record array
Array has size (ntimes, nitems). totim is always returned. nitems
is 2 if idx or obsname is not None or nobs+1.
See Also
--------
Notes
-----
If both idx and obsname are None, will return all of the observation
data.
Examples
--------
>>> hyd = HydmodObs("my_model.hyd")
>>> ts = hyd.get_data()
"""
i0 = 0
i1 = self.data.shape[0]
if totim is not None:
idx = np.where(self.data['totim'] == totim)[0][0]
i0 = idx
i1 = idx + 1
elif idx is not None:
if idx < i1:
i0 = idx
i1 = i0 + 1
r = None
if obsname is None:
obsname = self.get_obsnames()
else:
if obsname is not None:
if obsname not in self.data.dtype.names:
obsname = None
else:
if not isinstance(obsname, list):
obsname = [obsname]
if obsname is not None:
obsname.insert(0, 'totim')
r = get_selection(self.data, obsname)[i0:i1]
return r
def get_dataframe(self, start_datetime='1-1-1970',
idx=None, obsname=None, totim=None, timeunit='D'):
"""
Get pandas dataframe with the incremental and cumulative water budget
items in the hydmod file.
Parameters
----------
start_datetime : str
If start_datetime is passed as None, the rows are indexed on totim.
Otherwise, a DatetimeIndex is set. (default is 1-1-1970).
idx : int
The zero-based record number. The first record is record 0.
If idx is None and totim are None, a dataframe with all simulation
times is returned. (default is None)
obsname : string
The name of the observation to return. If obsname is None, all
observation data are returned. (default is None)
totim : float
The simulation time to return. If idx is None and totim are None,
a dataframe with all simulation times is returned.
(default is None)
timeunit : string
time unit of the simulation time. Valid values are 'S'econds,
'M'inutes, 'H'ours, 'D'ays, 'Y'ears. (default is 'D').
Returns
-------
out : pandas dataframe
Pandas dataframe of selected data.
See Also
--------
Notes
-----
If both idx and obsname are None, will return all of the observation
data as a dataframe.
Examples
--------
>>> hyd = HydmodObs("my_model.hyd")
>>> df = hyd.get_dataframes()
"""
try:
import pandas as pd
from ..utils.utils_def import totim_to_datetime
except Exception as e:
raise Exception(
"HydmodObs.get_dataframe() error import pandas: " + \
str(e))
i0 = 0
i1 = self.data.shape[0]
if totim is not None:
idx = np.where(self.data['totim'] == totim)[0][0]
i0 = idx
i1 = idx + 1
elif idx is not None:
if idx < i1:
i0 = idx
i1 = i0 + 1
if obsname is None:
obsname = self.get_obsnames()
else:
if obsname is not None:
if obsname not in self.data.dtype.names:
obsname = None
else:
if not isinstance(obsname, list):
obsname = [obsname]
if obsname is None:
return None
obsname.insert(0, 'totim')
dti = self.get_times()[i0:i1]
if start_datetime is not None:
dti = totim_to_datetime(dti,
start=pd.to_datetime(start_datetime),
timeunit=timeunit)
df = pd.DataFrame(self.data[i0:i1], index=dti, columns=obsname)
return df
def _read_data(self):
if self.data is not None:
return
while True:
try:
r = self.read_record(count=1)
if self.data is None:
self.data = r.copy()
elif r.size == 0:
break
else:
# should be hstack based on (https://mail.scipy.org/pipermail/numpy-discussion/2010-June/051107.html)
self.data = np.hstack((self.data, r))
except:
break
return
def _build_dtype(self):
"""
Build the recordarray and iposarray, which maps the header information
to the position in the formatted file.
"""
raise Exception(
'Abstract method _build_dtype called in BinaryFiles. This method needs to be overridden.')
def _build_index(self):
"""
Build the recordarray and iposarray, which maps the header information
to the position in the formatted file.
"""
raise Exception(
'Abstract method _build_index called in BinaryFiles. This method needs to be overridden.')
class HydmodObs(ObsFiles):
"""
HydmodObs Class - used to read binary MODFLOW HYDMOD package output
Parameters
----------
filename : str
Name of the hydmod output file
verbose : boolean
If true, print additional information to to the screen during the
extraction. (default is False)
hydlbl_len : int
Length of hydmod labels. (default is 20)
Returns
-------
None
"""
def __init__(self, filename, verbose=False, hydlbl_len=20):
"""
Class constructor.
"""
super(HydmodObs, self).__init__()
# initialize class information
self.verbose = verbose
# --open binary head file
self.file = open(filename, 'rb')
# NHYDTOT,ITMUNI
self.nobs = self.read_integer()
precision = 'single'
if self.nobs < 0:
self.nobs = abs(self.nobs)
precision = 'double'
self.set_float(precision)
# continue reading the file
self.itmuni = self.read_integer()
self.v = np.empty(self.nobs, dtype=np.float)
self.v.fill(1.0E+32)
ctime = self.read_text(nchar=4)
self.hydlbl_len = int(hydlbl_len)
# read HYDLBL
hydlbl = []
for idx in range(0, self.nobs):
cid = self.read_text(self.hydlbl_len)
hydlbl.append(cid)
self.hydlbl = np.array(hydlbl)
# build dtype
self._build_dtype()
# build index
self._build_index()
self.data = None
self._read_data()
def _build_dtype(self):
# create dtype
dtype = [('totim', self.floattype)]
for site in self.hydlbl:
if not isinstance(site, str):
site_name = site.decode().strip()
else:
site_name = site.strip()
dtype.append((site_name, self.floattype))
self.dtype = np.dtype(dtype)
return
def _build_index(self):
return
class SwrObs(ObsFiles):
"""
Read binary SWR observations output from MODFLOW SWR Process
observation files
Parameters
----------
filename : string
Name of the cell budget file
precision : string
'single' or 'double'. Default is 'double'.
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> so = flopy.utils.SwrObs('mymodel.swr.obs')
"""
def __init__(self, filename, precision='double', verbose=False):
"""
Class constructor.
"""
super(SwrObs, self).__init__()
self.set_float(precision=precision)
# initialize class information
self.verbose = verbose
# open binary head file
self.file = open(filename, 'rb')
# NOBS
self.nobs = self.read_integer()
# read obsnames
obsnames = []
for idx in range(0, self.nobs):
cid = self.read_text()
if isinstance(cid, bytes):
cid = cid.decode()
obsnames.append(cid.strip())
self.obs = obsnames
# read header information
self._build_dtype()
# build index
self._build_index()
# read data
self.data = None
self._read_data()
def _build_dtype(self):
vdata = [('totim', self.floattype)]
for name in self.obs:
vdata.append((str(name), self.floattype))
self.dtype = np.dtype(vdata)
return
def _build_index(self):
return
def get_selection(data, names):
"""
Parameters
----------
data : numpy recarray
recarray of data to make a selection from
names : string or list of strings
column names to return
Returns
-------
out : numpy recarry
recarray with selection
"""
if not isinstance(names, list):
names = [names]
ierr = 0
for name in names:
if name not in data.dtype.names:
ierr += 1
print('Error: {} is not a valid column name'.format(name))
if ierr > 0:
raise Exception('Error: {} names did not match'.format(ierr))
# Valid list of names so make a selection
dtype2 = np.dtype({name: data.dtype.fields[name] for name in names})
return np.ndarray(data.shape, dtype2, data, 0, data.strides)
| bsd-3-clause |
luwei0917/awsemmd_script | plotcontour.py | 1 | 2880 | #!/usr/bin/env python3
'''
Generates contour plot from 3 columns of data.
'''
import sys
import argparse
import os
import numpy as np
from numpy.random import uniform
#from matplotlib import rc
#rc('text', usetex=True)
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
from scipy.interpolate import interp2d
parser = argparse.ArgumentParser(description='Plots pmf data.')
parser.add_argument("filename", nargs='?', help="input filename", default="pmf-350.dat")
parser.add_argument("outname", nargs='?', help="output filename", default="test.png")
parser.add_argument("-dpi", default=150, type=int, help="figure dpi")
parser.add_argument("-x", default=1, type=int, help="x column number in f")
parser.add_argument("-xmin", default=0, type=float, help="x axis lower limit")
parser.add_argument("-xmax", default=1, type=float, help="x axis upper limit")
parser.add_argument("-y", default=2, type=int, help="y column number in f")
parser.add_argument("-ymin", default=0, type=float, help="y axis lower limit")
parser.add_argument("-ymax", default=1, type=float, help="y axis upper limit")
parser.add_argument("-z", default=3, type=int, help="z column number in f")
parser.add_argument("-zmin", default=0, type=float, help="z axis lower limit")
parser.add_argument("-zmax", default=30, type=float, help="z axis upper limit")
parser.add_argument("-title", default='', help="title")
parser.add_argument("-xlabel", default='', help="xlabel")
parser.add_argument("-ylabel", default='', help="ylabel")
parser.add_argument("-axisfontsize", default=18, type=float, help="font size of xlabel, ylabel")
parser.add_argument("-titlefontsize", default=28, type=float, help="font size of title")
args = parser.parse_args()
mpl.rcParams.update({'font.size': args.axisfontsize})
data = np.loadtxt(args.filename)
data = data[~np.isnan(data).any(axis=1)] # remove rows with nan
data = data[~(data[:,args.z] > args.zmax)] # remove rows of data for z not in [zmin zmax]
data = data[~(data[:,args.z] < args.zmin)]
xi = np.linspace(min(data[:,args.x]), max(data[:,args.x]), 20)
yi = np.linspace(min(data[:,args.y]), max(data[:,args.y]), 20)
zi = griddata((data[:,args.x], data[:,args.y]), data[:,args.z], (xi[None,:], yi[:,None]), method='linear')
# plt.contour(xi, yi, zi, 50, linewidths=0.25,colors='k')
jet = cm = plt.get_cmap('jet')
print(jet)
# plt.contourf(xi, yi, zi, 20, cmap='rainbow')
plt.contourf(xi, yi, zi, 30, cmap='jet')
plt.xlim(args.xmin, args.xmax)
plt.ylim(args.ymin, args.ymax)
plt.clim(args.zmin, args.zmax)
plt.colorbar()
plt.xlabel(args.xlabel)
plt.ylabel(args.ylabel)
plt.title(args.title, y=1.02, fontsize = args.titlefontsize)
#plt.tight_layout()
#plt.axis('equal')
#plt.axes().set_aspect('equal')
#plt.axes().set_aspect('scaled')
plt.savefig(args.outname, dpi=args.dpi, bbox_inches='tight')
os.system("open " + args.outname)
| mit |
aminert/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/cluster/plot_segmentation_toy.py | 1 | 4262 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image as image_
from sklearn.cluster import spectral_clustering
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
# #############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image_.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
# #############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image_.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
# plt.show()
pltshow(plt)
| mit |
mfjb/scikit-learn | sklearn/neighbors/unsupervised.py | 22 | 4751 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
ehua7365/RibbonOperators | TEBD/mpstest6.py | 1 | 10594 | """
mpstest6.py
A test of manipulating matrix product states with numpy.
2014-08-25
"""
import numpy as np
import matplotlib.pyplot as plt
from cmath import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def main():
#test1()
#test2()
test3()
#test4()
#test5()
def test1():
print("*** MPS tests started ***")
(N,chi,d) = (7,10,2)
A = randomMPS(N,chi,d)
state = getState(A)
state = state/np.sqrt(np.dot(np.conj(state),state))
prod = np.dot(np.conj(state),state)
approxA = getMPS(state,2)
approxState = getState(approxA)
approxProd = np.dot(np.conj(approxState),approxState)
relErr = approxProd/prod - 1
S = entropy(state)
print("State total %d elements"%state.size)
print("MPS total %d elements"%A.size)
print("(N,chi,d) = (%d,%d,%d)"%(N,chi,d))
print("Expected: (%f,%f)"%polar(prod))
print("SVD: (%f,%f)"%polar(innerProduct(approxA,approxA)))
print("Product: (%f,%f)"%polar(approxProd))
print("Relative error: %f"%np.absolute(relErr))
print("Entropy: %f"%S)
print("")
# state = np.ones(d**N)/np.sqrt(2)**N
# state = np.zeros(2**10)
# state[0] = 1/np.sqrt(2)
# state[-1] = 1/np.sqrt(2)
state = np.random.rand(d**N)
state = state/np.linalg.norm(state)
mps = getMPS(state,4)
print("Expected: (%f,%f)"%polar(np.inner(state,state)))
print("MPS: (%f,%f)"%polar(innerProduct(mps,mps)))
print("*** MPS tests finished ***\n")
def test2():
print("*** Started testing MPS approximation ***")
(N,chi,d) = (5,3,2)
A = randomMPS(N,chi,d)
a = getState(A)
for newChi in xrange(1,12):
newA = getMPS(a,newChi)
print(fidelityMPS(A,newA))
newa = getState(newA)
print(fidelity(a,newa))
print(fidelity(a,a))
print(fidelityMPS(A,A))
print("*** Finished testing MPS approximation ***")
def test3():
print("*** Started testing MPS ***")
N = 5
d = 2
X = []
Y = []
Z = []
for chi0 in xrange(1,8):
for chi1 in xrange(1,8):
F = 0
for i in xrange(20):
mps = randomMPS(N,chi0,d)
state = getState(mps)
newmps = getMPS(state,chi1)
state1 = getState(newmps)
F += fidelityMPS(mps,newmps)
X.append(chi0)
Y.append(chi1)
Z.append(F/20)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(X, Y, Z, cmap=cm.jet, linewidth=0.2)
ax.set_xlabel('chi0')
ax.set_ylabel('chi1')
ax.set_zlabel('fidelity')
plt.show()
print("*** Finished testing MPS ***")
def test4():
print("*** Started testing fidelity ***")
d = 2
N = 5
for i in xrange(10):
mpsa = randomMPS(N,5,d)
a = getState(mpsa)
mpsb = getMPS(a,2)
b = getState(mpsb)
print(fidelity(a,b))
print(fidelityMPS(mpsa,mpsb))
print("*** Finished testing fidelity ***")
def test5():
print("*** Started testing MPS ***")
N = 5
d = 2
X = []
Y = []
Z = []
for chi0 in xrange(1,8):
for chi1 in xrange(1,8):
F = 0
for i in xrange(5):
mps = randomMPS(N,chi0,d)
state0 = getState(mps)
newmps = getMPS(state0,chi1)
state1 = getState(newmps)
F += fidelity(state0,state1)
X.append(chi0)
Y.append(chi1)
Z.append(F/20)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(X, Y, Z, cmap=cm.jet, linewidth=0.2)
ax.set_xlabel('chi0')
ax.set_ylabel('chi1')
ax.set_zlabel('fidelity')
plt.show()
print("*** Finished testing MPS ***")
def closeness(a,b):
return np.inner(np.conj(a),a)-np.inner(np.conj(b),b)
def correlation(A,B):
return innerProduct(A,B)*innerProduct(B,A)/innerProduct(A,A)/innerProduct(B,B)
def fidelityMPS(A,B):
""" Fidelity of two MPS """
return innerProduct(A,B)*innerProduct(B,A)\
/innerProduct(A,A)/innerProduct(B,B)
def fidelity(a,b):
""" Fidelity of two states """
return np.inner(np.conj(a),b)*np.inner(np.conj(b),a)\
/np.inner(np.conj(a),a)/np.inner(np.conj(b),b)
def randomMPS(N,chi,d):
""" Returns a random MPS given parameters N, chi, d."""
A = []
for i in xrange(N):
A.append((np.random.rand(chi,d,chi)-.5)+1j*(np.random.rand(chi,d,chi)-.5))
#A.append(np.random.rand(chi,d,chi))
return np.array(A)
def bellState():
return np.array([1,0,0,1],dtype=complex)/np.sqrt(2)
def getState(A):
""" State vector of a MPS."""
N = len(A)
chi = A[0].shape[0]
d = A[0].shape[1]
c = A[0]
for i in xrange(1,N):
c = np.tensordot(c,A[i],axes=(-1,0))
c = np.trace(c,axis1=0,axis2=-1)
return np.reshape(c,d**N)
def getMPS(state,chi):
""" MPS of a state."""
d = 2 # Qubits have 2 states each
N = int(np.log2(len(state))) # Number of qubits
c = np.reshape(state,cShape(d,N)) # State amplitudes tensor.
A = [] # List of N matrices of MPS, each of shape (chi,d,chi)
# Start left end with a vector of size (d,chi)
c = np.reshape(c,(d,d**(N-1))) # Reshape c
(ap,sv,c) = np.linalg.svd(c) # Apply SVD
s = np.zeros((d,chi),dtype=complex) # Construct shape of singular value matrix s
s[:d,:d] = np.diag(sv[:chi]) # Fill s with singular values
# Trim c or fill rest of c with zeros
newc = np.zeros((chi,d**(N-1)),dtype=complex)
newc[:min(chi,d**(N-1)),:] = c[:chi,:]
c = newc
A.append(np.tensordot(ap,s,axes=(-1,0))) # Contract and append to A
# Sweep through the middle, creating matrix products each with
# shape (chi,d,chi)
for i in xrange(1,N-2):
c = np.reshape(c,(d*chi,d**(N-i-1)))
(ap,sv,c) = np.linalg.svd(c)
s = np.zeros((d*chi,chi),dtype=complex)
s[:min(chi,len(sv)),:min(chi,len(sv))] = np.diag(sv[:chi])
A.append(np.reshape(np.dot(ap,s),(chi,d,chi)))
newc = np.zeros((chi,d**(N-i-1)),dtype=complex)
newc[:min(chi,len(sv)),:] = c[:chi,:]
c = newc
# Finish right end with the remaining vector
c = np.reshape(c,(d*chi,d))
(ap,sv,c) = np.linalg.svd(c)
s = np.zeros((chi,d),dtype=complex)
s[:d,:d] = np.diag(sv[:chi])
A.append(np.reshape(ap[:chi,:],(chi,d,chi)))
c = np.dot(s,c)
A.append(c)
# Fix up ends by filling first row of correctly shaped zeros with
# end vectors such that the trace is preserved.
start = np.zeros((chi,d,chi),dtype=complex)
start[0,:,:] = A[0]
A[0] = start
finish = np.zeros((chi,d,chi),dtype=complex)
finish[:,:,0] = A[-1]
A[-1] = finish
# Return MPS as numpy array with shape (N,chi,d,chi)
return np.array(A)
def innerProduct(A,B):
""" Inner product <A|B> using transfer matrices."""
N = len(A)
chiA = A.shape[1]
chiB = B.shape[1]
d = A.shape[2]
# Take adjoint of |A> to get <A|
A = np.conj(A)
# Construct list of transfer matrices by contracting pairs of
# tensors from A and B.
transfer = []
for i in xrange(N):
t = np.tensordot(A[i],B[i],axes=(1,1))
t = np.transpose(t,axes=(0,2,1,3))
t = np.reshape(t,(chiA*chiB,chiA*chiB))
transfer.append(t)
# Contract the transfer matrices.
prod = transfer[0]
for i in xrange(1,len(transfer)):
prod = np.tensordot(prod,transfer[i],axes=(-1,0))
return np.trace(prod)
def operatorInner(A,U,B):
""" Compute <A|U|B> where A,B are MPS and U is a MPO."""
N = len(A)
d = A.shape[2]
chiA = A.shape[1]
chiB = B.shape[1]
chiU = U.shape[1]
# Take complex conjugate of elements in A to get <A|
A = np.conj(A)
# Construct list of transfer matrices
transfer = []
for i in xrange(N):
t = np.tensordot(A[i],U[i],axes=(1,1))
t = np.tensordot(t,B[i],axes=(3,1))
t = np.reshape(t,(chiA*chiA*d,chiB*chiB*d))
transfer.append(t)
# Take product of transfer matrices
prod = transfer[0]
for i in xrange(1,N):
prod = np.tensordot(prod,transfer[i],axes=(-1,0))
return np.trace(prod)
def getOperator(mpo):
""" Contract MPO into matrix representation."""
N = len(A)
d = mpo.shape[2]
chi = mpo.shape[1]
prod = mpo[0]
for i in xrange(1,N):
prod = np.tensordot(prod,mpo[i],axes=(-1,0))
prod = np.trace(prod,axis1=0,axis2=-1)
permutation = tuple(range(0,2*N,2) + range(1,2*N,2))
prod = np.transpose(prod,perutation)
return np.reshape(prod,(d**N,d**N))
def getMPO(U,chi):
""" Returns MPO of operator U."""
d = 2
N = int(np.log2(U.shape[0]))
mpo = []
c = np.reshape(U,tuple([i for i in xrange(2*N)]))
permutation = []
for i in xrange(N):
permutation.append(i)
permutation.append(i+N)
c = np.transpose(U,tuple(permutation))
c = np.reshape(c,(d**2,d**(2*(N-1))))
[up,sv,c] = np.linalg.svd(c)
return 0
def randomState(d,N):
state = (np.random.rand(d**N)-.5) + (np.random.rand(d**N)-.5)*1j
state = state/np.linalg.norm(state)
return state
def equalDist(N):
""" Returns state with equal amplitudes."""
return np.ones(cShape(2,N))/np.sqrt(2)**N
def ghz(N):
c = np.zeros(2**N)
c[0] = 1/np.sqrt(2)
c[-1] = 1/np.sqrt(2)
return np.reshape(c,cShape(2,N))
def Z(N):
sz = np.array([[1,0],[0,-1]])
z = np.identity(2)
for i in xrange(N):
z = np.kron(z,sz)
return z
def tp(factors):
""" Returns tensor product of list of matrices."""
prod = factors[0]
for i in xrange(1,len(factors)):
prod = np.kron(prod,factors)
return prod
def cShape(d,N):
""" Returns the shape of c tensor representation."""
return tuple([d for i in xrange(N)])
def densityMatrix(state):
p = np.absolute(state)
rho = np.outer(p,p)
## print np.linalg.det(rho)
return rho
def entropy(state):
""" Von Neumann Entropy of pure state by SVD. """
c = np.reshape(state,(2,np.size(state)/2))
d = np.linalg.svd(c)[1]
p = np.abs(d)**2
S = 0
for x in p:
if x != 0:
S += x*np.log(x)
return -S
def matFunction(f,A):
""" Function of a matrix. """
(D,P) = np.linalg.eig(A)
return np.dot(P,np.dot(np.diag(f(D)),np.linalg.inv(P)))
if __name__ == "__main__":
main()
| mit |
michellemorales/OpenMM | models/ptn/utils.py | 2 | 3689 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import StringIO
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab as p
# axes3d is being used implictly for visualization.
from mpl_toolkits.mplot3d import axes3d as p3 # pylint:disable=unused-import
import numpy as np
from PIL import Image
from skimage import measure
import tensorflow as tf
def save_image(inp_array, image_file):
"""Function that dumps the image to disk."""
inp_array = np.clip(inp_array, 0, 255).astype(np.uint8)
image = Image.fromarray(inp_array)
buf = StringIO.StringIO()
image.save(buf, format='JPEG')
with open(image_file, 'w') as f:
f.write(buf.getvalue())
return None
def image_flipud(images):
"""Function that flip (up-down) the np image."""
quantity = images.get_shape().as_list()[0]
image_list = []
for k in xrange(quantity):
image_list.append(tf.image.flip_up_down(images[k, :, :, :]))
outputs = tf.stack(image_list)
return outputs
def resize_image(inp_array, new_height, new_width):
"""Function that resize the np image."""
inp_array = np.clip(inp_array, 0, 255).astype(np.uint8)
image = Image.fromarray(inp_array)
# Reverse order
image = image.resize((new_width, new_height))
return np.array(image)
def display_voxel(points, vis_size=128):
"""Function to display 3D voxel."""
try:
data = visualize_voxel_spectral(points, vis_size)
except ValueError:
data = visualize_voxel_scatter(points, vis_size)
return data
def visualize_voxel_spectral(points, vis_size=128):
"""Function to visualize voxel (spectral)."""
points = np.rint(points)
points = np.swapaxes(points, 0, 2)
fig = p.figure(figsize=(1, 1), dpi=vis_size)
verts, faces = measure.marching_cubes(points, 0, spacing=(0.1, 0.1, 0.1))
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(
verts[:, 0], verts[:, 1], faces, verts[:, 2], cmap='Spectral_r', lw=0.1)
ax.set_axis_off()
fig.tight_layout(pad=0)
fig.canvas.draw()
data = np.fromstring(
fig.canvas.tostring_rgb(), dtype=np.uint8, sep='').reshape(
vis_size, vis_size, 3)
p.close('all')
return data
def visualize_voxel_scatter(points, vis_size=128):
"""Function to visualize voxel (scatter)."""
points = np.rint(points)
points = np.swapaxes(points, 0, 2)
fig = p.figure(figsize=(1, 1), dpi=vis_size)
ax = fig.add_subplot(111, projection='3d')
x = []
y = []
z = []
(x_dimension, y_dimension, z_dimension) = points.shape
for i in range(x_dimension):
for j in range(y_dimension):
for k in range(z_dimension):
if points[i, j, k]:
x.append(i)
y.append(j)
z.append(k)
ax.scatter3D(x, y, z)
ax.set_axis_off()
fig.tight_layout(pad=0)
fig.canvas.draw()
data = np.fromstring(
fig.canvas.tostring_rgb(), dtype=np.uint8, sep='').reshape(
vis_size, vis_size, 3)
p.close('all')
return data
| gpl-2.0 |
aabadie/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
jblackburne/scikit-learn | sklearn/metrics/cluster/supervised.py | 11 | 33436 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, max_n_classes=5000):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
max_n_classes : int, optional (default=5000)
Maximal number of classeses handled for contingency_matrix.
This help to avoid Memory error with regression target
for mutual_information.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
if n_classes > max_n_classes:
raise ValueError("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
if n_clusters > max_n_classes:
raise ValueError("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred, max_n_classes=5000):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0 or
classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes=5000):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred,
max_n_classes=max_n_classes)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred, max_n_classes=5000):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[0]
def completeness_score(labels_true, labels_pred, max_n_classes=5000):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[1]
def v_measure_score(labels_true, labels_pred, max_n_classes=5000):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None,
max_n_classes=5000):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the mutual_info_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, max_n_classes=5000):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
max_n_classes : int, optional (default=5000)
Maximal number of classes handled by the Fowlkes-Mallows
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred,)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
tk = np.dot(c.ravel(), c.ravel()) - n_samples
pk = np.sum(np.sum(c, axis=0) ** 2) - n_samples
qk = np.sum(np.sum(c, axis=1) ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
rvraghav93/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 63 | 2945 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positive elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
colors = cycle(['b', 'r', 'g', 'c', 'k'])
neg_log_alphas_lasso = -np.log10(alphas_lasso)
neg_log_alphas_enet = -np.log10(alphas_enet)
for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso)
for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet)
for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors):
l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c)
l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Djabbz/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
schets/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 45 | 5463 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
l_x = float(l_x)
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
ky822/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
MTgeophysics/mtpy | mtpy/utils/shapefiles_creator.py | 1 | 35249 | #! /usr/bin/env python
"""
Description:
Create shape files for Phase Tensor Ellipses, Tipper Real/Imag.
export the phase tensor map and tippers into jpeg/png images
CreationDate: 2017-03-06
Developer: [email protected]
Revision History:
LastUpdate: 10/11/2017 FZ fix bugs after the big merge
LastUpdate: 20/11/2017 change from freq to period filenames, allow to specify a period
LastUpdate: 30/10/2018 combine ellipses and tippers together, refactorings
[email protected] 27-03-2020 17:33:23 AEDT:
Fix outfile/directory issue (see commit messages)
"""
import glob
import logging
import os
import sys
import click
import geopandas as gpd
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1 import make_axes_locatable
from shapely.geometry import Point, Polygon, LineString, LinearRing
from mtpy.core.edi_collection import EdiCollection
from mtpy.utils.mtpy_decorator import deprecated
from mtpy.utils.mtpylog import MtPyLog
from mtpy.utils.edi_folders import recursive_glob
mpl.rcParams['lines.linewidth'] = 2
# mpl.rcParams['lines.color'] = 'r'
mpl.rcParams['figure.figsize'] = [10, 6]
_logger = MtPyLog.get_mtpy_logger(__name__) # logger inside this file/module
_logger.setLevel(logging.DEBUG) # set your logger level
class ShapefilesCreator(EdiCollection):
""" Extend the EdiCollection parent class,
create phase tensor and tipper shapefiles for a list of edifiles
:param edifile_list: [path2edi,...]
:param outdir: path2output dir, where the shp file will be written.
:param orig_crs = {'init': 'epsg:4283'} # GDA94
"""
def __init__(self, edifile_list, outdir, epsg_code=4326):
"""
loop through a list of edi files, create required shapefiles
:param edifile_list: [path2edi,...]
:param outdir: path2output dir, where the shp file will be written.
:param epsg_code: epsg code of the EDI data CRS.
"""
self.orig_crs = {'init': 'epsg:{}'.format(epsg_code)}
# ensure that outdir is specified, and be created if not there.
if outdir is None:
raise Exception("Error: OutputDir is not specified!!!")
elif not os.path.exists(outdir):
os.mkdir(outdir)
self.outdir = outdir
# call the super constructor
super(ShapefilesCreator, self).__init__(edilist=edifile_list, outdir=outdir)
# python-3 syntax: super().__init__(edilist=edifile_list, outdir=outdir)
self.stations_distances = self.get_stations_distances_stats()
# These attributes below are defined in the parent class.
# self.all_periods = self._get_all_periods()
# self.ptol = 0.05 # this param controls how near-equal freqs/periods are grouped together:
# 10% may result more double countings of freq/periods than 5%.
# eg: E:\Data\MT_Datasets\WenPingJiang_EDI 18528 rows vs 14654 rows
def _export_shapefiles(self, gpdf, element_type, epsg_code, period, export_fig):
"""
Convenience function for saving shapefiles.
Parameters
----------
gpdf : geopandas.GeoDataFrame
Dataframe containg shapefile data.
element_type : str
Name of the element type, e.g. 'Phase_Tensor'.
epsg_code : int
EPSG code for CRS of the shapefile.
period : float
The period of the data.
export_fig : bool
Whether or not to export the shapefile as an image.
Returns
-------
str
Path to the shapefile.
"""
filename = '{}_EPSG_{}_Period_{}.shp'.format(element_type, epsg_code, period)
directory = os.path.join(self.outdir, 'Period_{}'.format(period))
if not os.path.exists(directory):
os.mkdir(directory)
outpath = os.path.join(directory, filename)
gpdf.to_file(outpath, driver='ESRI Shapefile')
self._logger.info("Saved shapefile to %s", outpath)
if export_fig is True:
# this bbox ensures that the whole MT-stations area is covered independent of periods
bbox_dict = self.get_bounding_box(epsgcode=epsg_code)
path2jpg = outpath.replace(".shp", ".jpg")
export_geopdf_to_image(gpdf, bbox_dict, path2jpg, colorby='phi_max',
colormap='nipy_spectral_r')
self._logger.info("Saved image to %s", outpath)
return outpath
def create_phase_tensor_shp(self, period, ellipsize=None,
target_epsg_code=4283, export_fig=False):
"""
create phase tensor ellipses shape file correspond to a MT period
:return: (geopdf_obj, path_to_shapefile)
"""
if ellipsize is None: # automatically decide suitable ellipse size.
ellipsize = self.stations_distances.get("Q1PERCENT") / 2 # a half or a third of the min_distance?
self._logger.debug("Automatically Selected Max-Ellispse Size = %s", ellipsize)
pt = self.get_phase_tensor_tippers(period)
self._logger.debug("phase tensor values =: %s", pt)
if len(pt) < 1:
self._logger.warn("No phase tensor for the period %s for any MT station", period)
return None
pdf = pd.DataFrame(pt)
self._logger.debug(pdf['period'])
mt_locations = [Point(xy) for xy in zip(pdf['lon'], pdf['lat'])]
geopdf = gpd.GeoDataFrame(pdf, crs=self.orig_crs, geometry=mt_locations)
# points to trace out the polygon-ellipse
theta = np.arange(0, 2 * np.pi, np.pi / 30.)
azimuth = -np.deg2rad(geopdf['azimuth'])
scaling = ellipsize / geopdf['phi_max']
width = geopdf['phi_max'] * scaling
height = geopdf['phi_min'] * scaling
x0 = geopdf['lon']
y0 = geopdf['lat']
# Find invalid ellipses
bad_min = np.where(np.logical_or(geopdf['phi_min'] == 0, geopdf['phi_min'] > 100))[0]
bad_max = np.where(np.logical_or(geopdf['phi_max'] == 0, geopdf['phi_max'] > 100))[0]
dot = 0.0000001 * ellipsize
height[bad_min] = dot
height[bad_max] = dot
width[bad_min] = dot
width[bad_max] = dot
# apply formula to generate ellipses
ellipse_list = []
for i in range(0, len(azimuth)):
x = x0[i] + height[i] * np.cos(theta) * np.cos(azimuth[i]) - \
width[i] * np.sin(theta) * np.sin(azimuth[i])
y = y0[i] + height[i] * np.cos(theta) * np.sin(azimuth[i]) + \
width[i] * np.sin(theta) * np.cos(azimuth[i])
polyg = Polygon(LinearRing([xy for xy in zip(x, y)]))
# print polyg # an ellispe
ellipse_list.append(polyg)
geopdf = gpd.GeoDataFrame(geopdf, crs=self.orig_crs, geometry=ellipse_list)
if target_epsg_code is None:
self._logger.info("The orginal Geopandas Dataframe CRS: %s", geopdf.crs)
# {'init': 'epsg:4283', 'no_defs': True}
# raise Exception("Must provide a target_epsg_code")
target_epsg_code = geopdf.crs['init'][5:]
else:
geopdf.to_crs(epsg=target_epsg_code, inplace=True)
# world = world.to_crs({'init': 'epsg:3395'})
# world.to_crs(epsg=3395) would also work
path2shp = \
self._export_shapefiles(geopdf, 'Phase_Tensor', target_epsg_code, period, export_fig)
return (geopdf, path2shp)
def create_tipper_real_shp(self, period, line_length=None, target_epsg_code=4283, export_fig=False):
"""
create real tipper lines shapefile from a csv file
The shapefile consists of lines without arrow.
User can use GIS software such as ArcGIS to display and add an arrow at each line's end
line_length is how long will be the line, auto-calculatable
"""
if line_length is None: # auto-calculate the tipper arrow length
line_length = self.stations_distances.get("Q1PERCENT")
self._logger.info("Automatically Selected Max Tipper Length = %s", line_length)
pt = self.get_phase_tensor_tippers(period)
self._logger.debug("phase tensor values =: %s", pt)
if len(pt) < 1:
self._logger.warn("No phase tensor for the period %s for any MT station", period)
return None
pdf = pd.DataFrame(pt)
tip_mag_re_maxval = pdf['tip_mag_re'].max()
if (tip_mag_re_maxval > 0.00000001):
line_length_normalized = line_length / tip_mag_re_maxval
else:
line_length_normalized = line_length
self._logger.debug(pdf['period'])
pdf['tip_re'] = pdf.apply(lambda x:
LineString([(float(x.lon), float(x.lat)),
(float(x.lon) + line_length_normalized * x.tip_mag_re * np.cos(
-np.deg2rad(x.tip_ang_re)),
float(x.lat) + line_length_normalized * x.tip_mag_re * np.sin(
-np.deg2rad(x.tip_ang_re)))]), axis=1)
geopdf = gpd.GeoDataFrame(pdf, crs=self.orig_crs, geometry='tip_re')
if target_epsg_code is None:
self._logger.info("Geopandas Datframe CRS: %s", geopdf.crs)
# {'init': 'epsg:4283', 'no_defs': True}
# raise Exception("Must provide a target_epsg_code")
target_epsg_code = geopdf.crs['init'][5:]
else:
geopdf.to_crs(epsg=target_epsg_code, inplace=True)
# world = world.to_crs({'init': 'epsg:3395'})
# world.to_crs(epsg=3395) would also work
path2shp = \
self._export_shapefiles(geopdf, 'Tipper_Real', target_epsg_code, period, export_fig)
return (geopdf, path2shp)
def create_tipper_imag_shp(self, period, line_length=None, target_epsg_code=4283, export_fig=False):
"""
create imagery tipper lines shapefile from a csv file
The shapefile consists of lines without arrow.
User can use GIS software such as ArcGIS to display and add an arrow at each line's end
line_length is how long will be the line, auto-calculatable
:return:(geopdf_obj, path_to_shapefile)
"""
if line_length is None: # auto-calculate the tipper arrow length
line_length = self.stations_distances.get("Q1PERCENT")
self._logger.info("Automatically Selected Max-Tipper Length =: %s", line_length)
pt = self.get_phase_tensor_tippers(period)
self._logger.debug("phase tensor values =: %s", pt)
if len(pt) < 1:
self._logger.warn("No phase tensor for the period %s for any MT station", period)
return None
pdf = pd.DataFrame(pt)
tip_mag_im_maxval = pdf['tip_mag_im'].max()
if (tip_mag_im_maxval > 0.00000001):
line_length_normalized = line_length / tip_mag_im_maxval
else:
line_length_normalized = line_length
self._logger.debug(pdf['period'])
pdf['tip_im'] = pdf.apply(lambda x: LineString([(float(x.lon), float(x.lat)),
(float(x.lon) + line_length_normalized * x.tip_mag_im * np.cos(
-np.deg2rad(x.tip_ang_im)),
float(x.lat) + line_length_normalized * x.tip_mag_im * np.sin(
-np.deg2rad(x.tip_ang_im)))]),
axis=1)
geopdf = gpd.GeoDataFrame(pdf, crs=self.orig_crs, geometry='tip_im')
if target_epsg_code is None:
self._logger.info("Keep the Default/Original Geopandas Dataframe CRS: %s", geopdf.crs)
# {'init': 'epsg:4283', 'no_defs': True}
# raise Exception("Must provide a target_epsg_code")
target_epsg_code = geopdf.crs['init'][5:]
else:
geopdf.to_crs(epsg=target_epsg_code, inplace=True)
# world = world.to_crs({'init': 'epsg:3395'})
# world.to_crs(epsg=3395) would also work
path2shp = \
self._export_shapefiles(geopdf, 'Tipper_Imag', target_epsg_code, period, export_fig)
return (geopdf, path2shp)
def create_tensor_tipper_shapefiles(edi_dir, out_dir, periods,
pt_base_size=None, pt_phi_max=None,
src_epsg=4326, dst_epsg=4326):
"""
Interface for creating and saving phase tensor and tipper
shapefiles.
Parameters
----------
edi_dir : str
Path to directory containing .edi data files.
out_dir : str
Path to directory to save resulint shapefiles.
src_epsg : int
EPSG code of the EDI data CRS. Defaults 4326 (WGS84).
dst_epsg : int
EPSG code of the output (i.e. same CRS as the geotiff you will
be displaying on). Defaults 4326 (WGS84).
period_indicies : float or list of float. Defaults to 0.0.
List of periods in seconds to create shapefiles for. The nearest
period to each value will be selected.
"""
if not isinstance(edi_dir, str):
raise TypeError("'edi_dir' must be string containg path to EDI files")
if not isinstance(out_dir, str):
raise TypeError("'out_dir' must be string containing path to output file")
edifiles = recursive_glob(edi_dir)
sfc = ShapefilesCreator(edifiles, out_dir, epsg_code=src_epsg)
all_periods = sfc.all_unique_periods
periods = [periods] if not isinstance(periods, list) else periods
for p in periods:
# Find closest period.
index = np.argmin(np.fabs(np.asarray(all_periods) - p))
nearest = all_periods[index]
_logger.info("Found nearest period {}s for selected period {}s".format(nearest, p))
sfc.create_phase_tensor_shp(all_periods[index], target_epsg_code=dst_epsg,
ellipsize=pt_base_size)
sfc.create_tipper_real_shp(all_periods[index], target_epsg_code=dst_epsg)
sfc.create_tipper_imag_shp(all_periods[index], target_epsg_code=dst_epsg)
def plot_phase_tensor_ellipses_and_tippers(edi_dir, out_dir, iperiod=0):
"""
plot phase tensor ellipses and tipers into one figure.
:param edi_dir: edi directory
:param outfile: save figure to output file
:param iperiod: the index of periods
:return: saved figure file
"""
if not isinstance(out_dir, str):
raise TypeError("'out_dir' must be string containing path to output file")
edifiles = recursive_glob(edi_dir)
myobj = ShapefilesCreator(edifiles, out_dir)
allper = myobj.all_unique_periods
gpd_phtensor = myobj.create_phase_tensor_shp(allper[iperiod], export_fig=False)[0]
gpd_retip = myobj.create_tipper_real_shp(allper[iperiod], export_fig=False)[0]
gpd_imtip = myobj.create_tipper_imag_shp(allper[iperiod], export_fig=False)[0]
# composing two layers in a map
f, ax = plt.subplots(1, figsize=(20, 12))
# ax.set_xlim([140.5,141])
# ax.set_ylim([-21,-20])
# Add layer of polygons on the axis
# world.plot(ax=ax, alpha=0.5) # background map
gpd_phtensor.plot(ax=ax, linewidth=2, facecolor='grey', edgecolor='black')
gpd_retip.plot(ax=ax, color='red', linewidth=4)
gpd_imtip.plot(ax=ax, color='blue', linewidth=4)
outfile = os.path.join(out_dir, "phase_tensor_tipper_{}.png".format(iperiod))
if out_dir is not None:
plt.savefig(outfile)
return outfile
####################################################################
# Using geopandas to convert CSV files into shape files
# Refs:
# http://toblerity.org/shapely/manual.html#polygons
# https://geohackweek.github.io/vector/04-geopandas-intro/
# ===================================================================
def create_ellipse_shp_from_csv(csvfile, esize=0.03, target_epsg_code=4283):
"""
create phase tensor ellipse geometry from a csv file. This function needs csv file as its input.
:param csvfile: a csvfile with full path
:param esize: ellipse size, defaut 0.03 is about 3KM in the max ellipse rad
:return: a geopandas dataframe
"""
# crs = {'init': 'epsg:4326'} # if assume initial crs WGS84
crs = {'init': 'epsg:4283'} # if assume initial crs GDA94
pdf = pd.read_csv(csvfile)
mt_locations = [Point(xy) for xy in zip(pdf['lon'], pdf['lat'])]
# OR pdf['geometry'] = pdf.apply(lambda z: Point(z.lon, z.lat), axis=1)
# if you want to df = df.drop(['Lon', 'Lat'], axis=1)
pdf = gpd.GeoDataFrame(pdf, crs=crs, geometry=mt_locations)
# make pt_ellispes using polygons
phi_max_v = pdf['phi_max'].max() # the max of this group of ellipse
# points to trace out the polygon-ellipse
theta = np.arange(0, 2 * np.pi, np.pi / 30.)
azimuth = -np.deg2rad(pdf['azimuth'])
width = esize * (pdf['phi_max'] / phi_max_v)
height = esize * (pdf['phi_min'] / phi_max_v)
x0 = pdf['lon']
y0 = pdf['lat']
# apply formula to generate ellipses
ellipse_list = []
for i in range(0, len(azimuth)):
x = x0[i] + height[i] * np.cos(theta) * np.cos(azimuth[i]) - \
width[i] * np.sin(theta) * np.sin(azimuth[i])
y = y0[i] + height[i] * np.cos(theta) * np.sin(azimuth[i]) + \
width[i] * np.sin(theta) * np.cos(azimuth[i])
polyg = Polygon(LinearRing([xy for xy in zip(x, y)]))
# print polyg # an ellispe
ellipse_list.append(polyg)
pdf = gpd.GeoDataFrame(pdf, crs=crs, geometry=ellipse_list)
if target_epsg_code is None:
raise Exception("Must provide a target_epsg_code")
else:
pdf.to_crs(epsg=target_epsg_code, inplace=True)
# world = world.to_crs({'init': 'epsg:3395'})
# world.to_crs(epsg=3395) would also work
# to shape file
shp_fname = csvfile.replace('.csv', '_ellip_epsg%s.shp' % target_epsg_code)
pdf.to_file(shp_fname, driver='ESRI Shapefile')
return pdf
def create_tipper_real_shp_from_csv(csvfile, line_length=0.03, target_epsg_code=4283):
""" create tipper lines shape from a csv file. This function needs csv file as its input.
The shape is a line without arrow.
Must use a GIS software such as ArcGIS to display and add an arrow at each line's end
line_length=4 how long will be the line (arrow)
return: a geopandas dataframe object for further processing.
"""
# crs = {'init': 'epsg:4326'} # if assume initial crs WGS84
crs = {'init': 'epsg:4283'} # if assume initial crs GDA94
pdf = pd.read_csv(csvfile)
# mt_locations = [Point(xy) for xy in zip(pdf.lon, pdf.lat)]
# OR pdf['geometry'] = pdf.apply(lambda z: Point(z.lon, z.lat), axis=1)
# if you want to df = df.drop(['Lon', 'Lat'], axis=1)
# geo_df = gpd.GeoDataFrame(pdf, crs=crs, geometry=mt_locations)
pdf['tip_re'] = pdf.apply(lambda x:
LineString([(float(x.lon), float(x.lat)),
(float(x.lon) + line_length * x.tip_mag_re * np.cos(
-np.deg2rad(x.tip_ang_re)),
float(x.lat) + line_length * x.tip_mag_re * np.sin(
-np.deg2rad(x.tip_ang_re)))]), axis=1)
pdf = gpd.GeoDataFrame(pdf, crs=crs, geometry='tip_re')
if target_epsg_code is None:
raise Exception("Must provide a target_epsg_code")
else:
pdf.to_crs(epsg=target_epsg_code, inplace=True)
# world = world.to_crs({'init': 'epsg:3395'})
# world.to_crs(epsg=3395) would also work
# to shape file
shp_fname = csvfile.replace('.csv', '_real_epsg%s.shp' % target_epsg_code)
pdf.to_file(shp_fname, driver='ESRI Shapefile')
return pdf
def create_tipper_imag_shp_from_csv(csvfile, line_length=0.03, target_epsg_code=4283):
""" create imagery tipper lines shape from a csv file. this function needs csv file as input.
The shape is a line without arrow.
Must use a GIS software such as ArcGIS to display and add an arrow at each line's end
line_length=4 how long will be the line (arrow)
return: a geopandas dataframe object for further processing.
"""
# crs = {'init': 'epsg:4326'} # if assume initial crs WGS84
crs = {'init': 'epsg:4283'} # if assume initial crs GDA94
pdf = pd.read_csv(csvfile)
# mt_locations = [Point(xy) for xy in zip(pdf.lon, pdf.lat)]
# OR pdf['geometry'] = pdf.apply(lambda z: Point(z.lon, z.lat), axis=1)
# if you want to df = df.drop(['Lon', 'Lat'], axis=1)
# geo_df = gpd.GeoDataFrame(pdf, crs=crs, geometry=mt_locations)
pdf['tip_im'] = pdf.apply(lambda x:
LineString([(float(x.lon), float(x.lat)),
(float(x.lon) + line_length * x.tip_mag_im * np.cos(
-np.deg2rad(x.tip_ang_im)),
float(x.lat) + line_length * x.tip_mag_im * np.sin(
-np.deg2rad(x.tip_ang_im)))]), axis=1)
pdf = gpd.GeoDataFrame(pdf, crs=crs, geometry='tip_im')
if target_epsg_code is None:
raise Exception("Must provide a target_epsg_code") # EDI original lat/lon epsg 4326 or GDA94
else:
pdf.to_crs(epsg=target_epsg_code, inplace=True)
# world = world.to_crs({'init': 'epsg:3395'})
# world.to_crs(epsg=3395) would also work
# to shape file
shp_fname = csvfile.replace('.csv', '_imag_epsg%s.shp' % target_epsg_code)
pdf.to_file(shp_fname, driver='ESRI Shapefile')
return pdf
def export_geopdf_to_image(geopdf, bbox, jpg_file_name, target_epsg_code=None, colorby=None, colormap=None,
showfig=False):
"""
Export a geopandas dataframe to a jpe_file, with optionally a new epsg projection.
:param geopdf: a geopandas dataframe
:param bbox: This param ensures that we can set a consistent display area defined by a dict with 4 keys
[MinLat, MinLon, MaxLat, MaxLon], cover all ground stations, not just this period-dependent geopdf
:param output jpg_file_name: path2jpeg
:param target_epsg_code: 4326 etc
:param showfig: If True, then display fig on screen.
:return:
"""
if target_epsg_code is None:
p = geopdf
# target_epsg_code = '4283' # EDI orginal lat/lon epsg 4326=WGS84 or 4283=GDA94
target_epsg_code = geopdf.crs['init'][5:]
else:
p = geopdf.to_crs(epsg=target_epsg_code)
# world = world.to_crs({'init': 'epsg:3395'})
# world.to_crs(epsg=3395) would also work
# bounds = p.total_bounds # lat-lon bounds for this csv dataframe
# plot and save
fig_title = os.path.basename(jpg_file_name)
_logger.info('saving figure to file %s', jpg_file_name)
if colorby is None:
colorby = 'phi_min'
else:
colorby = colorby
if colormap is None:
my_colormap = mpl.cm.gist_ncar # a default choice: jet_r #'jet'
else:
my_colormap = colormap
if int(target_epsg_code) == 4326 or int(target_epsg_code) == 4283:
myax = p.plot(figsize=[10, 10], linewidth=2.0, column=colorby, cmap=my_colormap) # , marker='o', markersize=10)
# add colorbar
divider = make_axes_locatable(myax)
# pad = separation from figure to colorbar
cax = divider.append_axes("right", size="3%", pad=0.2)
fig = myax.get_figure()
sm = plt.cm.ScalarMappable(cmap=my_colormap) # , norm=plt.Normalize(vmin=vmin, vmax=vmax))
# fake up the array of the scalar mappable. Urgh...
sm._A = p[colorby] # [1,2,3]
cb = fig.colorbar(sm, cax=cax, orientation='vertical')
cb.set_label(colorby, fontdict={'size': 15, 'weight': 'bold'})
# myax = p.plot(figsize=[10, 8], linewidth=2.0, column='phi_max', cmap='jet') # , vmin=vmin, vmax=vmax)
# calculate and set xy limit:
# myax.set_xlim([140.2, 141.2]) #LieJunWang
# myax.set_ylim([-20.8, -19.9])
# myax.set_xlim([140, 150]) # GA-Vic
# myax.set_ylim([-39, -34])
#
# myax.set_xlim([136.7, 137.0]) # 3D_MT_data_
# myax.set_ylim([-20.65, -20.35])
#
# myax.set_xlim([140.0, 144.5]) # WPJ
# myax.set_ylim([-23.5, -19.0])
# automatically adjust plot xy-scope
margin = 0.02 # degree
margin = 0.05 * (bbox['MaxLon'] - bbox['MinLon'] + bbox['MaxLat'] - bbox['MinLat'])
myax.set_xlim((bbox['MinLon'] - margin, bbox['MaxLon'] + margin))
myax.set_ylim((bbox['MinLat'] - margin, bbox['MaxLat'] + margin))
myax.set_xlabel('Longitude')
myax.set_ylabel('Latitude')
myax.set_title(fig_title)
else: # UTM kilometer units
myax = p.plot(figsize=[10, 8], linewidth=2.0, column=colorby,
cmap=my_colormap) # simple plot need to have details added
myax.set_xlabel('East-West (KM)')
myax.set_ylabel('North-South (KM)')
myax.set_title(fig_title)
# myax.set_xlim([400000, 1300000])
# myax.set_ylim([5700000, 6200000])
#
# myax.set_xlim([400000, 900000])
# myax.set_ylim([7400000, 7900000])
# automatically adjust plot xy-scope
# margin = 2000 # meters
margin = 0.05 * (bbox['MaxLon'] - bbox['MinLon'] + bbox['MaxLat'] - bbox['MinLat'])
myax.set_xlim((bbox['MinLon'] - margin, bbox['MaxLon'] + margin))
myax.set_ylim((bbox['MinLat'] - margin, bbox['MaxLat'] + margin))
xticks = myax.get_xticks() / 1000
myax.set_xticklabels(xticks)
yticks = myax.get_yticks() / 1000
myax.set_yticklabels(yticks)
# add colorbar
divider = make_axes_locatable(myax)
# pad = separation from figure to colorbar
cax = divider.append_axes("right", size="3%", pad=0.2)
fig = myax.get_figure()
sm = plt.cm.ScalarMappable(cmap=my_colormap) # , norm=plt.Normalize(vmin=vmin, vmax=vmax))
# fake up the array of the scalar mappable. Urgh...
sm._A = p[colorby] # [1,2,3]
cb = fig.colorbar(sm, cax=cax, orientation='vertical')
cb.set_label(colorby, fontdict={'size': 15, 'weight': 'bold'})
fig = plt.gcf()
fig.savefig(jpg_file_name, dpi=400)
if showfig is True:
plt.show()
# cleanup memory now
plt.close() # this will make prog faster and not too many plot obj kept.
del (p)
del (geopdf)
del (fig)
def process_csv_folder(csv_folder, bbox_dict, target_epsg_code=4283):
"""
process all *.csv files in a dir, ude target_epsg_code=4283 GDA94 as default.
This function uses csv-files folder as its input.
:param csv_folder:
:return:
"""
if csv_folder is None:
_logger.critical("Must provide a csv folder")
csvfiles = glob.glob(csv_folder + '/*Hz.csv') # phase_tensor_tipper_0.004578Hz.csv
# for acsv in csvfiles[:2]:
for acsv in csvfiles:
tip_re_gdf = create_tipper_real_shp_from_csv(acsv, line_length=0.02, target_epsg_code=target_epsg_code)
my_gdf = tip_re_gdf
jpg_file_name = acsv.replace('.csv', '_tip_re_epsg%s.jpg' % target_epsg_code)
export_geopdf_to_image(my_gdf, bbox_dict, jpg_file_name, target_epsg_code)
tip_im_gdf = create_tipper_imag_shp_from_csv(acsv, line_length=0.02, target_epsg_code=target_epsg_code)
my_gdf = tip_im_gdf
jpg_file_name = acsv.replace('.csv', '_tip_im_epsg%s.jpg' % target_epsg_code)
export_geopdf_to_image(my_gdf, bbox_dict, jpg_file_name, target_epsg_code)
ellip_gdf = create_ellipse_shp_from_csv(acsv, esize=0.01, target_epsg_code=target_epsg_code)
# Now, visualize and output to image file from the geopandas dataframe
my_gdf = ellip_gdf
jpg_file_name = acsv.replace('.csv', '_ellips_epsg%s.jpg' % target_epsg_code)
export_geopdf_to_image(my_gdf, bbox_dict, jpg_file_name, target_epsg_code)
return
#############################################################################
# ==================================================================
# python mtpy/utils/shapefiles_creator.py data/edifiles /e/tmp
# ==================================================================
#############################################################################
if __name__ == "__main__OLD_V0":
edidir = sys.argv[1]
edifiles = glob.glob(os.path.join(edidir, "*.edi"))
if len(sys.argv) > 2:
path2out = sys.argv[2]
else:
path2out = None
# filter the edi files here if desired, to get a subset:
# edifiles2 = edifiles[0:-1:2]
shp_maker = ShapefilesCreator(edifiles, path2out)
# ptdic = shp_maker.create_csv_files_deprecated() # dest_dir=path2out) # create csv files E:/temp1
ptdic = shp_maker.create_phase_tensor_csv(path2out) # compare csv in E:/temp2
# print ptdic
# print ptdic[ptdic.keys()[0]]
# edisobj = mtpy.core.edi_collection.EdiCollection(edifiles)
edisobj = EdiCollection(edifiles)
bbox_dict = edisobj.bound_box_dict
print(bbox_dict)
bbox_dict2 = shp_maker.bound_box_dict
print(bbox_dict2)
if bbox_dict != bbox_dict2:
raise Exception("parent-child's attribute bbo_dic not equal!!!")
# create shapefiles and plots
# epsg projection 4283 - gda94
# process_csv_folder(path2out, bbox_dict)
# Further testing epsg codes:
# epsg projection 28354 - gda94 / mga zone 54
# epsg projection 32754 - wgs84 / utm zone 54s
# GDA94/GALCC =3112
for my_epsgcode in [3112, ]: # [4326, 4283, 3112, 32755]: # 32754, 28355]:
bbox_dict = edisobj.get_bounding_box(epsgcode=my_epsgcode)
print(bbox_dict)
process_csv_folder(path2out, bbox_dict, target_epsg_code=my_epsgcode)
###################################################################
# Example codes to use the ShapeFilesCreator class - new version
if __name__ == "__main__d":
edidir = sys.argv[1]
edifiles = glob.glob(os.path.join(edidir, "*.edi"))
if len(sys.argv) > 2:
path2out = sys.argv[2]
else:
path2out = None
# esize=0.08 # specify ellipse size ?
# Filter the edi files to get a subset:
everysite = 1 # every 1,2,3,4, 5
edi_list = edifiles[::everysite] # subset of the edi files
shp_maker = ShapefilesCreator(edi_list, path2out)
station_distance_stats = shp_maker.get_stations_distances_stats()
esize = None # if None, auto selected default in the method
tipsize = None # if None, auto selected default in the method
_logger.info("User-defined Max-Ellispse Size =:%s", esize)
_logger.info("User-defined Max-Tipper Length/Size =:%s", tipsize)
shp_maker.create_phase_tensor_shp(999.99, ellipsize=esize) # nothing created for non-existent peri
min_period = shp_maker.all_unique_periods[0]
max_period = shp_maker.all_unique_periods[-1]
# for aper in [min_period, max_period]:
for aper in shp_maker.all_unique_periods[::5]: # ascending order: from short to long periods
# default projection as target output
# shp_maker.create_phase_tensor_shp(2.85)
# shp_maker.create_phase_tensor_shp(aper, ellipsize=esize,export_fig=True)
# shp_maker.create_tipper_real_shp(aper, line_length=tipsize, export_fig=True)
# shp_maker.create_tipper_imag_shp(aper, line_length=tipsize, export_fig=True)
for my_epsgcode in [3112]: # [3112, 4326, 4283, 32754, 32755, 28353, 28354, 28355]:
shp_maker.create_phase_tensor_shp(aper, target_epsg_code=my_epsgcode, ellipsize=esize, export_fig=True)
shp_maker.create_tipper_real_shp(aper, line_length=tipsize, target_epsg_code=my_epsgcode, export_fig=True)
shp_maker.create_tipper_imag_shp(aper, line_length=tipsize, target_epsg_code=my_epsgcode, export_fig=True)
# ===================================================
# Click Command Wrapper for shape files from edi
# ===================================================
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('-i', '--input', type=str,
default='examples/data/edi_files_2',
help='input edi files dir ')
@click.option('-c', '--code', type=int, default=3112,
help='epsg code [3112, 4326, 4283, 32754, 32755, 28353, 28354, 28355]')
@click.option('-o', '--output', type=str, default="temp", help='Output directory')
def generate_shape_files(input, output, code):
print("=======================================================================")
print("Generating Shapes File requires following inputs edi files directory ")
print("Default epsg code 3112 ")
print(" epsg_code(4326, 4283, 32754, 32755, 28353, 28354, 28355) ")
print("Default output is in temp directory ")
print("=======================================================================")
if not os.path.isdir(output):
os.mkdir(output)
edifiles = glob.glob(os.path.join(input, "*.edi"))
# Filter the edi files to get a subset:
everysite = 1 # every 1,2,3,4, 5
edi_list = edifiles[::everysite] # subset of the edi files
shp_maker = ShapefilesCreator(edi_list, output)
# station_distance_stats= shp_maker.get_stations_distances_stats()
esize = None # if None, auto selected default in the method
tipsize = None # if None, auto selected default in the method
shp_maker.create_phase_tensor_shp(999.99, ellipsize=esize) # nothing created for non-existent peri
# min_period = shp_maker.all_unique_periods[0]
# max_period = shp_maker.all_unique_periods[-1]
# for aper in [min_period, max_period]:
for aper in shp_maker.all_unique_periods[::5]: # ascending order: from short to long periods
# default projection as target output
# shp_maker.create_phase_tensor_shp(2.85)
# shp_maker.create_phase_tensor_shp(aper, ellipsize=esize,export_fig=True)
# shp_maker.create_tipper_real_shp(aper, line_length=tipsize, export_fig=True)
# shp_maker.create_tipper_imag_shp(aper, line_length=tipsize, export_fig=True)
for my_epsgcode in [code]: # [3112, 4326, 4283, 32754, 32755, 28353, 28354, 28355]:
shp_maker.create_phase_tensor_shp(aper, target_epsg_code=my_epsgcode, ellipsize=esize, export_fig=True)
shp_maker.create_tipper_real_shp(aper, line_length=tipsize, target_epsg_code=my_epsgcode, export_fig=True)
shp_maker.create_tipper_imag_shp(aper, line_length=tipsize, target_epsg_code=my_epsgcode, export_fig=True)
if __name__ == "__main__":
print("Please see examples/scripts/create_pt_shapefiles.py")
# generate_shape_files() # click CLI interface
| gpl-3.0 |
btabibian/scikit-learn | sklearn/externals/joblib/parallel.py | 24 | 33170 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
from contextlib import contextmanager
import warnings
try:
import cPickle as pickle
except ImportError:
import pickle
from ._multiprocessing_helpers import mp
from .format_stack import format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend)
from ._compat import _basestring
# Make sure that those two classes are part of the public joblib.parallel API
# so that 3rd party backend implementers can import them from here.
from ._parallel_backends import AutoBatchingMixin # noqa
from ._parallel_backends import ParallelBackendBase # noqa
BACKENDS = {
'multiprocessing': MultiprocessingBackend,
'threading': ThreadingBackend,
'sequential': SequentialBackend,
}
# name of the backend used by default by Parallel outside of any context
# managed by ``parallel_backend``.
DEFAULT_BACKEND = 'multiprocessing'
DEFAULT_N_JOBS = 1
# Thread local value that can be overriden by the ``parallel_backend`` context
# manager
_backend = threading.local()
def get_active_backend():
"""Return the active default backend"""
active_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
if active_backend_and_jobs is not None:
return active_backend_and_jobs
# We are outside of the scope of any parallel_backend context manager,
# create the default backend instance now
active_backend = BACKENDS[DEFAULT_BACKEND]()
return active_backend, DEFAULT_N_JOBS
@contextmanager
def parallel_backend(backend, n_jobs=-1, **backend_params):
"""Change the default backend used by Parallel inside a with block.
If ``backend`` is a string it must match a previously registered
implementation using the ``register_parallel_backend`` function.
Alternatively backend can be passed directly as an instance.
By default all available workers will be used (``n_jobs=-1``) unless the
caller passes an explicit value for the ``n_jobs`` parameter.
This is an alternative to passing a ``backend='backend_name'`` argument to
the ``Parallel`` class constructor. It is particularly useful when calling
into library code that uses joblib internally but does not expose the
backend argument in its own API.
>>> from operator import neg
>>> with parallel_backend('threading'):
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
...
[-1, -2, -3, -4, -5]
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
if isinstance(backend, _basestring):
backend = BACKENDS[backend](**backend_params)
old_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
try:
_backend.backend_and_jobs = (backend, n_jobs)
# return the backend instance to make it easier to write tests
yield backend, n_jobs
finally:
if old_backend_and_jobs is None:
if getattr(_backend, 'backend_and_jobs', None) is not None:
del _backend.backend_and_jobs
else:
_backend.backend_and_jobs = old_backend_and_jobs
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instantiation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
"""Return the number of CPUs."""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
self.parallel._backend.batch_completed(self.batch_size,
this_batch_duration)
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
def register_parallel_backend(name, factory, make_default=False):
"""Register a new Parallel backend factory.
The new backend can then be selected by passing its name as the backend
argument to the Parallel class. Moreover, the default backend can be
overwritten globally by setting make_default=True.
The factory can be any callable that takes no argument and return an
instance of ``ParallelBackendBase``.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
BACKENDS[name] = factory
if make_default:
global DEFAULT_BACKEND
DEFAULT_BACKEND = name
def effective_n_jobs(n_jobs=-1):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the is the number of workers requested by the callers.
Passing n_jobs=-1 means requesting all available workers for instance
matching the number of CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that can
actually perform work concurrently with the currently enabled default
backend. The primary use case is to make it possible for the caller to know
in how many chunks to slice the work.
In general working on larger data chunks is more efficient (less
scheduling overhead and better use of CPU cache prefetching heuristics)
as long as all the workers have enough work to do.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
backend, _ = get_active_backend()
return backend.effective_n_jobs(n_jobs=n_jobs)
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str, ParallelBackendBase instance or None, \
default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
- finally, you can register backends by calling
register_parallel_backend. This will allow you to implement
a backend of your liking.
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
timeout: float, optional
Timeout limit for each task to complete. If any task takes longer
a TimeOutError will be raised. Only applied when n_jobs != 1
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers should never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAMdisk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages:
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process:
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend=None, verbose=0, timeout=None,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
active_backend, default_n_jobs = get_active_backend()
if backend is None and n_jobs == 1:
# If we are under a parallel_backend context manager, look up
# the default number of jobs and use that instead:
n_jobs = default_n_jobs
self.n_jobs = n_jobs
self.verbose = verbose
self.timeout = timeout
self.pre_dispatch = pre_dispatch
if isinstance(max_nbytes, _basestring):
max_nbytes = memstr_to_bytes(max_nbytes)
self._backend_args = dict(
max_nbytes=max_nbytes,
mmap_mode=mmap_mode,
temp_folder=temp_folder,
verbose=max(0, self.verbose - 50),
)
if DEFAULT_MP_CONTEXT is not None:
self._backend_args['context'] = DEFAULT_MP_CONTEXT
if backend is None:
backend = active_backend
elif isinstance(backend, ParallelBackendBase):
# Use provided backend as is
pass
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._backend_args['context'] = backend
backend = MultiprocessingBackend()
else:
try:
backend_factory = BACKENDS[backend]
except KeyError:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, sorted(BACKENDS.keys())))
backend = backend_factory()
if (batch_size == 'auto' or isinstance(batch_size, Integral) and
batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self._backend = backend
self._output = None
self._jobs = list()
self._managed_backend = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_backend = True
self._initialize_backend()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_backend()
self._managed_backend = False
def _initialize_backend(self):
"""Build a process or thread pool and return the number of workers"""
try:
n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
**self._backend_args)
if self.timeout is not None and not self._backend.supports_timeout:
warnings.warn(
'The backend class {!r} does not support timeout. '
"You have set 'timeout={}' in Parallel but "
"the 'timeout' parameter will not be used.".format(
self._backend.__class__.__name__,
self.timeout))
except FallbackToBackend as e:
# Recursively initialize the backend in case of requested fallback.
self._backend = e.backend
n_jobs = self._initialize_backend()
return n_jobs
def _effective_n_jobs(self):
if self._backend:
return self._backend.effective_n_jobs(self.n_jobs)
return 1
def _terminate_backend(self):
if self._backend is not None:
self._backend.terminate()
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._backend.apply_async(batch, callback=cb)
self._jobs.append(job)
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto':
batch_size = self._backend.compute_batch_size()
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if len(tasks) == 0:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# Original job iterator becomes None once it has been fully
# consumed : at this point we know the total number of jobs and we are
# able to display an estimation of the remaining time based on already
# completed jobs. Otherwise, we simply display the number of completed
# tasks.
if self._original_iterator is not None:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time), ))
else:
index = self.n_completed_tasks
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1 -
self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / index) * \
(self.n_dispatched_tasks - index * 1.0)
# only display status if remaining time is greater or equal to 0
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
if getattr(self._backend, 'supports_timeout', False):
self._output.extend(job.get(timeout=self.timeout))
else:
self._output.extend(job.get())
except BaseException as exception:
# Note: we catch any BaseException instead of just Exception
# instances to also include KeyboardInterrupt.
# Stop dispatching any new job in the async callback thread
self._aborting = True
# If the backend allows it, cancel or kill remaining running
# tasks without waiting for the results as we will raise
# the exception we got back to the caller instead of returning
# any result.
backend = self._backend
if (backend is not None and
hasattr(backend, 'abort_everything')):
# If the backend is managed externally we need to make sure
# to leave it in a working state to allow for future jobs
# scheduling.
ensure_ready = self._managed_backend
backend.abort_everything(ensure_ready=ensure_ready)
if not isinstance(exception, TransportableException):
raise
else:
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_backend:
n_jobs = self._initialize_backend()
else:
n_jobs = self._effective_n_jobs()
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator.
while self.dispatch_one_batch(iterator):
self._iterating = True
else:
self._iterating = False
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_backend:
self._terminate_backend()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
HolgerPeters/scikit-learn | sklearn/mixture/tests/test_gaussian_mixture.py | 26 | 40216 | # Author: Wei Xue <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clauseimport warnings
import sys
import warnings
import numpy as np
from scipy import stats, linalg
from sklearn.covariance import EmpiricalCovariance
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.mixture.gaussian_mixture import GaussianMixture
from sklearn.mixture.gaussian_mixture import (
_estimate_gaussian_covariances_full,
_estimate_gaussian_covariances_tied,
_estimate_gaussian_covariances_diag,
_estimate_gaussian_covariances_spherical)
from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky
from sklearn.mixture.gaussian_mixture import _compute_log_det_cholesky
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.utils.extmath import fast_logdet
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
def generate_data(n_samples, n_features, weights, means, precisions,
covariance_type):
rng = np.random.RandomState(0)
X = []
if covariance_type == 'spherical':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['spherical'])):
X.append(rng.multivariate_normal(m, c * np.eye(n_features),
int(np.round(w * n_samples))))
if covariance_type == 'diag':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['diag'])):
X.append(rng.multivariate_normal(m, np.diag(c),
int(np.round(w * n_samples))))
if covariance_type == 'tied':
for _, (w, m) in enumerate(zip(weights, means)):
X.append(rng.multivariate_normal(m, precisions['tied'],
int(np.round(w * n_samples))))
if covariance_type == 'full':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['full'])):
X.append(rng.multivariate_normal(m, c,
int(np.round(w * n_samples))))
X = np.vstack(X)
return X
class RandomData(object):
def __init__(self, rng, n_samples=500, n_components=2, n_features=2,
scale=50):
self.n_samples = n_samples
self.n_components = n_components
self.n_features = n_features
self.weights = rng.rand(n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.rand(n_components, n_features) * scale
self.covariances = {
'spherical': .5 + rng.rand(n_components),
'diag': (.5 + rng.rand(n_components, n_features)) ** 2,
'tied': make_spd_matrix(n_features, random_state=rng),
'full': np.array([
make_spd_matrix(n_features, random_state=rng) * .5
for _ in range(n_components)])}
self.precisions = {
'spherical': 1. / self.covariances['spherical'],
'diag': 1. / self.covariances['diag'],
'tied': linalg.inv(self.covariances['tied']),
'full': np.array([linalg.inv(covariance)
for covariance in self.covariances['full']])}
self.X = dict(zip(COVARIANCE_TYPE, [generate_data(
n_samples, n_features, self.weights, self.means, self.covariances,
covar_type) for covar_type in COVARIANCE_TYPE]))
self.Y = np.hstack([k * np.ones(int(np.round(w * n_samples)))
for k, w in enumerate(self.weights)])
def test_gaussian_mixture_attributes():
# test bad parameters
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
n_components_bad = 0
gmm = GaussianMixture(n_components=n_components_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_components': %d "
"Estimation requires at least one component"
% n_components_bad, gmm.fit, X)
# covariance_type should be in [spherical, diag, tied, full]
covariance_type_bad = 'bad_covariance_type'
gmm = GaussianMixture(covariance_type=covariance_type_bad)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type_bad,
gmm.fit, X)
tol_bad = -1
gmm = GaussianMixture(tol=tol_bad)
assert_raise_message(ValueError,
"Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative"
% tol_bad, gmm.fit, X)
reg_covar_bad = -1
gmm = GaussianMixture(reg_covar=reg_covar_bad)
assert_raise_message(ValueError,
"Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative" % reg_covar_bad, gmm.fit, X)
max_iter_bad = 0
gmm = GaussianMixture(max_iter=max_iter_bad)
assert_raise_message(ValueError,
"Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration"
% max_iter_bad, gmm.fit, X)
n_init_bad = 0
gmm = GaussianMixture(n_init=n_init_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_init': %d "
"Estimation requires at least one run"
% n_init_bad, gmm.fit, X)
init_params_bad = 'bad_method'
gmm = GaussianMixture(init_params=init_params_bad)
assert_raise_message(ValueError,
"Unimplemented initialization method '%s'"
% init_params_bad,
gmm.fit, X)
# test good parameters
n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1
covariance_type, init_params = 'full', 'random'
gmm = GaussianMixture(n_components=n_components, tol=tol, n_init=n_init,
max_iter=max_iter, reg_covar=reg_covar,
covariance_type=covariance_type,
init_params=init_params).fit(X)
assert_equal(gmm.n_components, n_components)
assert_equal(gmm.covariance_type, covariance_type)
assert_equal(gmm.tol, tol)
assert_equal(gmm.reg_covar, reg_covar)
assert_equal(gmm.max_iter, max_iter)
assert_equal(gmm.n_init, n_init)
assert_equal(gmm.init_params, init_params)
def test_check_X():
from sklearn.mixture.base import _check_X
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 2, 2
X_bad_dim = rng.rand(n_components - 1, n_features)
assert_raise_message(ValueError,
'Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X_bad_dim.shape[0]),
_check_X, X_bad_dim, n_components)
X_bad_dim = rng.rand(n_components, n_features + 1)
assert_raise_message(ValueError,
'Expected the input data X have %d features, '
'but got %d features'
% (n_features, X_bad_dim.shape[1]),
_check_X, X_bad_dim, n_components, n_features)
X = rng.rand(n_samples, n_features)
assert_array_equal(X, _check_X(X, n_components, n_features))
def test_check_weights():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check bad shape
weights_bad_shape = rng.rand(n_components, 1)
g.weights_init = weights_bad_shape
assert_raise_message(ValueError,
"The parameter 'weights' should have the shape of "
"(%d,), but got %s" %
(n_components, str(weights_bad_shape.shape)),
g.fit, X)
# Check bad range
weights_bad_range = rng.rand(n_components) + 1
g.weights_init = weights_bad_range
assert_raise_message(ValueError,
"The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights_bad_range),
np.max(weights_bad_range)),
g.fit, X)
# Check bad normalization
weights_bad_norm = rng.rand(n_components)
weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)
g.weights_init = weights_bad_norm
assert_raise_message(ValueError,
"The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f"
% np.sum(weights_bad_norm),
g.fit, X)
# Check good weights matrix
weights = rand_data.weights
g = GaussianMixture(weights_init=weights, n_components=n_components)
g.fit(X)
assert_array_equal(weights, g.weights_init)
def test_check_means():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check means bad shape
means_bad_shape = rng.rand(n_components + 1, n_features)
g.means_init = means_bad_shape
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
g.fit, X)
# Check good means matrix
means = rand_data.means
g.means_init = means
g.fit(X)
assert_array_equal(means, g.means_init)
def test_check_precisions():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
# Define the bad precisions for each covariance_type
precisions_bad_shape = {
'full': np.ones((n_components + 1, n_features, n_features)),
'tied': np.ones((n_features + 1, n_features + 1)),
'diag': np.ones((n_components + 1, n_features)),
'spherical': np.ones((n_components + 1))}
# Define not positive-definite precisions
precisions_not_pos = np.ones((n_components, n_features, n_features))
precisions_not_pos[0] = np.eye(n_features)
precisions_not_pos[0, 0, 0] = -1.
precisions_not_positive = {
'full': precisions_not_pos,
'tied': precisions_not_pos[0],
'diag': -1. * np.ones((n_components, n_features)),
'spherical': -1. * np.ones(n_components)}
not_positive_errors = {
'full': 'symmetric, positive-definite',
'tied': 'symmetric, positive-definite',
'diag': 'positive',
'spherical': 'positive'}
for covar_type in COVARIANCE_TYPE:
X = RandomData(rng).X[covar_type]
g = GaussianMixture(n_components=n_components,
covariance_type=covar_type,
random_state=rng)
# Check precisions with bad shapes
g.precisions_init = precisions_bad_shape[covar_type]
assert_raise_message(ValueError,
"The parameter '%s precision' should have "
"the shape of" % covar_type,
g.fit, X)
# Check not positive precisions
g.precisions_init = precisions_not_positive[covar_type]
assert_raise_message(ValueError,
"'%s precision' should be %s"
% (covar_type, not_positive_errors[covar_type]),
g.fit, X)
# Check the correct init of precisions_init
g.precisions_init = rand_data.precisions[covar_type]
g.fit(X)
assert_array_equal(rand_data.precisions[covar_type], g.precisions_init)
def test_suffstat_sk_full():
# compare the precision matrix compute from the
# EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean(axis=0).reshape((1, -1))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_tied():
# use equation Nk * Sk / N = S_tied
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,
0) / n_samples
covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
ecov.covariance_ = covars_pred_full
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, 'tied')
precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)
precs_est = linalg.inv(covars_pred_tied)
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_diag():
# test against 'full' case
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):
ecov.covariance_ = np.diag(np.diag(cov_full))
cov_diag = np.diag(cov_diag)
assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, 'diag')
assert_almost_equal(covars_pred_diag, 1. / precs_chol_pred ** 2)
def test_gaussian_suffstat_sk_spherical():
# computing spherical covariance equals to the variance of one-dimension
# data after flattening, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
X = rng.rand(n_samples, n_features)
X = X - X.mean()
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean()
covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X,
nk, xk, 0)
covars_pred_spherical2 = (np.dot(X.flatten().T, X.flatten()) /
(n_features * n_samples))
assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical,
'spherical')
assert_almost_equal(covars_pred_spherical, 1. / precs_chol_pred ** 2)
def test_compute_log_det_cholesky():
n_features = 2
rand_data = RandomData(np.random.RandomState(0))
for covar_type in COVARIANCE_TYPE:
covariance = rand_data.covariances[covar_type]
if covar_type == 'full':
predected_det = np.array([linalg.det(cov) for cov in covariance])
elif covar_type == 'tied':
predected_det = linalg.det(covariance)
elif covar_type == 'diag':
predected_det = np.array([np.prod(cov) for cov in covariance])
elif covar_type == 'spherical':
predected_det = covariance ** n_features
# We compute the cholesky decomposition of the covariance matrix
expected_det = _compute_log_det_cholesky(_compute_precision_cholesky(
covariance, covar_type), covar_type, n_features=n_features)
assert_array_almost_equal(expected_det, - .5 * np.log(predected_det))
def _naive_lmvnpdf_diag(X, means, covars):
resp = np.empty((len(X), len(means)))
stds = np.sqrt(covars)
for i, (mean, std) in enumerate(zip(means, stds)):
resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1)
return resp
def test_gaussian_mixture_log_probabilities():
from sklearn.mixture.gaussian_mixture import _estimate_log_gaussian_prob
# test aginst with _naive_lmvnpdf_diag
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_samples = 500
n_features = rand_data.n_features
n_components = rand_data.n_components
means = rand_data.means
covars_diag = rng.rand(n_components, n_features)
X = rng.rand(n_samples, n_features)
log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag)
# full covariances
precs_full = np.array([np.diag(1. / np.sqrt(x)) for x in covars_diag])
log_prob = _estimate_log_gaussian_prob(X, means, precs_full, 'full')
assert_array_almost_equal(log_prob, log_prob_naive)
# diag covariances
precs_chol_diag = 1. / np.sqrt(covars_diag)
log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, 'diag')
assert_array_almost_equal(log_prob, log_prob_naive)
# tied
covars_tied = np.array([x for x in covars_diag]).mean(axis=0)
precs_tied = np.diag(np.sqrt(1. / covars_tied))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[covars_tied] * n_components)
log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, 'tied')
assert_array_almost_equal(log_prob, log_prob_naive)
# spherical
covars_spherical = covars_diag.mean(axis=1)
precs_spherical = 1. / np.sqrt(covars_diag.mean(axis=1))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[[k] * n_features for k in
covars_spherical])
log_prob = _estimate_log_gaussian_prob(X, means,
precs_spherical, 'spherical')
assert_array_almost_equal(log_prob, log_prob_naive)
# skip tests on weighted_log_probabilities, log_weights
def test_gaussian_mixture_estimate_log_prob_resp():
# test whether responsibilities are normalized
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=5)
n_samples = rand_data.n_samples
n_features = rand_data.n_features
n_components = rand_data.n_components
X = rng.rand(n_samples, n_features)
for covar_type in COVARIANCE_TYPE:
weights = rand_data.weights
means = rand_data.means
precisions = rand_data.precisions[covar_type]
g = GaussianMixture(n_components=n_components, random_state=rng,
weights_init=weights, means_init=means,
precisions_init=precisions,
covariance_type=covar_type)
g.fit(X)
resp = g.predict_proba(X)
assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples))
assert_array_equal(g.weights_init, weights)
assert_array_equal(g.means_init, means)
assert_array_equal(g.precisions_init, precisions)
def test_gaussian_mixture_predict_predict_proba():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(n_components=rand_data.n_components,
random_state=rng, weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type)
# Check a warning message arrive if we don't do fit
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this method.", g.predict, X)
g.fit(X)
Y_pred = g.predict(X)
Y_pred_proba = g.predict_proba(X).argmax(axis=1)
assert_array_equal(Y_pred, Y_pred_proba)
assert_greater(adjusted_rand_score(Y, Y_pred), .95)
def test_gaussian_mixture_fit():
# recover the ground truth
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_features = rand_data.n_features
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=20,
reg_covar=0, random_state=rng,
covariance_type=covar_type)
g.fit(X)
# needs more data to pass the test with rtol=1e-7
assert_allclose(np.sort(g.weights_), np.sort(rand_data.weights),
rtol=0.1, atol=1e-2)
arg_idx1 = g.means_[:, 0].argsort()
arg_idx2 = rand_data.means[:, 0].argsort()
assert_allclose(g.means_[arg_idx1], rand_data.means[arg_idx2],
rtol=0.1, atol=1e-2)
if covar_type == 'full':
prec_pred = g.precisions_
prec_test = rand_data.precisions['full']
elif covar_type == 'tied':
prec_pred = np.array([g.precisions_] * n_components)
prec_test = np.array([rand_data.precisions['tied']] * n_components)
elif covar_type == 'spherical':
prec_pred = np.array([np.eye(n_features) * c
for c in g.precisions_])
prec_test = np.array([np.eye(n_features) * c for c in
rand_data.precisions['spherical']])
elif covar_type == 'diag':
prec_pred = np.array([np.diag(d) for d in g.precisions_])
prec_test = np.array([np.diag(d) for d in
rand_data.precisions['diag']])
arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()
arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()
for k, h in zip(arg_idx1, arg_idx2):
ecov = EmpiricalCovariance()
ecov.covariance_ = prec_test[h]
# the accuracy depends on the number of data and randomness, rng
assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.1)
def test_gaussian_mixture_fit_best_params():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
n_init = 10
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
ll = []
for _ in range(n_init):
g.fit(X)
ll.append(g.score(X))
ll = np.array(ll)
g_best = GaussianMixture(n_components=n_components,
n_init=n_init, reg_covar=0, random_state=rng,
covariance_type=covar_type)
g_best.fit(X)
assert_almost_equal(ll.min(), g_best.score(X))
def test_gaussian_mixture_fit_convergence_warning():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=1)
n_components = rand_data.n_components
max_iter = 1
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=max_iter, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_warns_message(ConvergenceWarning,
'Initialization %d did not converged. '
'Try different init parameters, '
'or increase max_iter, tol '
'or check for degenerate data.'
% max_iter, g.fit, X)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
for cv_type in COVARIANCE_TYPE:
train1 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=rng).fit(X).score(X)
train2 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=rng, n_init=5).fit(X).score(X)
assert_greater_equal(train2, train1)
def test_gaussian_mixture_n_parameters():
# Test that the right number of parameters is estimated
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng).fit(X)
assert_equal(g._n_parameters(), n_params[cv_type])
def test_bic_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
rng = np.random.RandomState(0)
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
bic_full = GaussianMixture(n_components=n_components,
covariance_type='full',
random_state=rng).fit(X).bic(X)
for covariance_type in ['tied', 'diag', 'spherical']:
bic = GaussianMixture(n_components=n_components,
covariance_type=covariance_type,
random_state=rng).fit(X).bic(X)
assert_almost_equal(bic_full, bic)
def test_gaussian_mixture_aic_bic():
# Test the aic and bic criteria
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 3, 2
X = rng.randn(n_samples, n_features)
# standard gaussian entropy
sgh = 0.5 * (fast_logdet(np.cov(X.T, bias=1)) +
n_features * (1 + np.log(2 * np.pi)))
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng, max_iter=200)
g.fit(X)
aic = 2 * n_samples * sgh + 2 * g._n_parameters()
bic = (2 * n_samples * sgh +
np.log(n_samples) * g._n_parameters())
bound = n_features / np.sqrt(n_samples)
assert_true((g.aic(X) - aic) / n_samples < bound)
assert_true((g.bic(X) - bic) / n_samples < bound)
def test_gaussian_mixture_verbose():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=1)
h = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
h.fit(X)
finally:
sys.stdout = old_stdout
def test_warm_start():
random_state = 0
rng = np.random.RandomState(random_state)
n_samples, n_features, n_components = 500, 2, 2
X = rng.rand(n_samples, n_features)
# Assert the warm_start give the same result for the same number of iter
g = GaussianMixture(n_components=n_components, n_init=1, max_iter=2,
reg_covar=0, random_state=random_state,
warm_start=False)
h = GaussianMixture(n_components=n_components, n_init=1, max_iter=1,
reg_covar=0, random_state=random_state,
warm_start=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
g.fit(X)
score1 = h.fit(X).score(X)
score2 = h.fit(X).score(X)
assert_almost_equal(g.weights_, h.weights_)
assert_almost_equal(g.means_, h.means_)
assert_almost_equal(g.precisions_, h.precisions_)
assert_greater(score2, score1)
# Assert that by using warm_start we can converge to a good solution
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=False, tol=1e-6)
h = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=True, tol=1e-6)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
g.fit(X)
h.fit(X).fit(X)
assert_true(not g.converged_)
assert_true(h.converged_)
def test_score():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this method.", gmm1.score, X)
# Check score value
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
gmm1.fit(X)
gmm_score = gmm1.score(X)
gmm_score_proba = gmm1.score_samples(X).mean()
assert_almost_equal(gmm_score, gmm_score_proba)
# Check if the score increase
gmm2 = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng,
covariance_type=covar_type).fit(X)
assert_greater(gmm2.score(X), gmm1.score(X))
def test_score_samples():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this method.", gmm.score_samples, X)
gmm_score_samples = gmm.fit(X).score_samples(X)
assert_equal(gmm_score_samples.shape[0], rand_data.n_samples)
def test_monotonic_likelihood():
# We check that each step of the EM without regularization improve
# monotonically the training set likelihood
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, reg_covar=0,
warm_start=True, max_iter=1, random_state=rng,
tol=1e-7)
current_log_likelihood = -np.infty
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_log_likelihood = current_log_likelihood
try:
current_log_likelihood = gmm.fit(X).score(X)
except ConvergenceWarning:
pass
assert_greater_equal(current_log_likelihood,
prev_log_likelihood)
if gmm.converged_:
break
assert_true(gmm.converged_)
def test_regularisation():
# We train the GaussianMixture on degenerate data by defining two clusters
# of a 0 covariance.
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = np.vstack((np.ones((n_samples // 2, n_features)),
np.zeros((n_samples // 2, n_features))))
for covar_type in COVARIANCE_TYPE:
gmm = GaussianMixture(n_components=n_samples, reg_covar=0,
covariance_type=covar_type, random_state=rng)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_raise_message(ValueError,
"Fitting the mixture model failed because "
"some components have ill-defined empirical "
"covariance (for instance caused by "
"singleton or collapsed samples). Try to "
"decrease the number of components, or "
"increase reg_covar.", gmm.fit, X)
gmm.set_params(reg_covar=1e-6).fit(X)
def test_property():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng,
n_init=5)
gmm.fit(X)
if covar_type == 'full':
for prec, covar in zip(gmm.precisions_, gmm.covariances_):
assert_array_almost_equal(linalg.inv(prec), covar)
elif covar_type == 'tied':
assert_array_almost_equal(linalg.inv(gmm.precisions_),
gmm.covariances_)
else:
assert_array_almost_equal(gmm.precisions_, 1. / gmm.covariances_)
def test_sample():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7, n_components=3)
n_features, n_components = rand_data.n_features, rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng)
# To sample we need that GaussianMixture is fitted
assert_raise_message(NotFittedError, "This GaussianMixture instance "
"is not fitted", gmm.sample, 0)
gmm.fit(X)
assert_raise_message(ValueError, "Invalid value for 'n_samples",
gmm.sample, 0)
# Just to make sure the class samples correctly
n_samples = 20000
X_s, y_s = gmm.sample(n_samples)
for k in range(n_components):
if covar_type == 'full':
assert_array_almost_equal(gmm.covariances_[k],
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'tied':
assert_array_almost_equal(gmm.covariances_,
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'diag':
assert_array_almost_equal(gmm.covariances_[k],
np.diag(np.cov(X_s[y_s == k].T)),
decimal=1)
else:
assert_array_almost_equal(
gmm.covariances_[k], np.var(X_s[y_s == k] - gmm.means_[k]),
decimal=1)
means_s = np.array([np.mean(X_s[y_s == k], 0)
for k in range(n_components)])
assert_array_almost_equal(gmm.means_, means_s, decimal=1)
# Check shapes of sampled data, see
# https://github.com/scikit-learn/scikit-learn/issues/7701
assert_equal(X_s.shape, (n_samples, n_features))
for sample_size in range(1, 100):
X_s, _ = gmm.sample(sample_size)
assert_equal(X_s.shape, (sample_size, n_features))
@ignore_warnings(category=ConvergenceWarning)
def test_init():
# We check that by increasing the n_init number we have a better solution
random_state = 0
rand_data = RandomData(np.random.RandomState(random_state), scale=1)
n_components = rand_data.n_components
X = rand_data.X['full']
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, random_state=random_state).fit(X)
gmm2 = GaussianMixture(n_components=n_components, n_init=100,
max_iter=1, random_state=random_state).fit(X)
assert_greater(gmm2.lower_bound_, gmm1.lower_bound_)
| bsd-3-clause |
rhyolight/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py | 69 | 104273 | """
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`:
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`:
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N*=2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree:
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree:
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <[email protected]>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`:
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| agpl-3.0 |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/example/kaggle-ndsb1/submission_dsb.py | 52 | 5048 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import pandas as pd
import os
import time as time
## Receives an array with probabilities for each class (columns) X images in test set (as listed in test.lst) and formats in Kaggle submission format, saves and compresses in submission_path
def gen_sub(predictions,test_lst_path="test.lst",submission_path="submission.csv"):
## append time to avoid overwriting previous submissions
## submission_path=time.strftime("%Y%m%d%H%M%S_")+submission_path
### Make submission
## check sampleSubmission.csv from kaggle website to view submission format
header = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# read first line to know the number of columns and column to use
img_lst = pd.read_csv(test_lst_path,sep="/",header=None, nrows=1)
columns = img_lst.columns.tolist() # get the columns
cols_to_use = columns[len(columns)-1] # drop the last one
cols_to_use= map(int, str(cols_to_use)) ## convert scalar to list
img_lst= pd.read_csv(test_lst_path,sep="/",header=None, usecols=cols_to_use) ## reads lst, use / as sep to goet last column with filenames
img_lst=img_lst.values.T.tolist()
df = pd.DataFrame(predictions,columns = header, index=img_lst)
df.index.name = 'image'
print("Saving csv to %s" % submission_path)
df.to_csv(submission_path)
print("Compress with gzip")
os.system("gzip -f %s" % submission_path)
print(" stored in %s.gz" % submission_path)
| apache-2.0 |
sanketloke/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 34 | 25693 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
danjamker/DiffusionSimulation | MRJobNetworkX-Combine.py | 1 | 3777 | from __future__ import division
import gzip
try:
from BytesIO import BytesIO
except ImportError:
from io import BytesIO
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import hdfs
import networkx as nx
import pandas as pd
from mrjob.job import MRJob
from mrjob.protocol import JSONValueProtocol
from mrjob.step import MRStep
import cascade
import metrics
import json
class MRJobNetworkX(MRJob):
OUTPUT_PROTOCOL = JSONValueProtocol
def configure_options(self):
super(MRJobNetworkX, self).configure_options()
self.add_file_option('--network')
self.add_passthrough_option('--avrage', type='int', default=0, help='...')
def runCascade(self, C):
cas = C
idx = []
values = []
met = metrics.metric(cas.getGraph())
while True:
try:
cas.next()
met.add(cas.getInfectedNode())
values.append(met.asMap())
idx.append(cas.getStep())
except StopIteration:
break
return idx, values
def mapper_init(self):
self.G = nx.read_gpickle(self.options.network)
self.tmp = {node: 0 for node in self.G.nodes()}
nx.set_node_attributes(self.G, 'activated', self.tmp)
def mapper(self, _, line):
nx.set_node_attributes(self.G, 'activated', self.tmp)
client = hdfs.client.Client("http://" + urlparse(line).netloc)
if line[-1] != "#":
with client.read(urlparse(line).path) as r:
# with open(urlparse(line).path) as r:
buf = BytesIO(r.read())
if ".gz" in line:
gzip_f = gzip.GzipFile(fileobj=buf)
content = gzip_f.read()
idx, values = self.runCascade(cascade.actualCascade(StringIO.StringIO(content), self.G))
else:
idx, values = self.runCascade(cascade.actualCascade(buf, self.G))
df = pd.DataFrame(values, index=idx)
result_user = df.drop_duplicates(subset='numberActivatedUsers', keep='first').set_index(
['numberActivatedUsers'], verify_integrity=True)
result_act = df.drop_duplicates(subset='numberOfActivations', keep='first').set_index(
['numberOfActivations'], verify_integrity=True)
yield "apple", {"file": line, "name": line.split("/")[-1],
"result_user": result_user.loc[-1:].to_json(orient='records'),
"result_act": result_act.loc[-1:].to_json(orient='records')}
def mappertwo(self, _, line):
yield "apple", json.loads(line)
def reducer(self, key, values):
r_u_l = None
r_a_l = None
for v in values:
print v
if r_u_l is None:
r_a_l = pd.read_json(v["result_act"])
r_u_l = pd.read_json(v["result_user"])
else:
r_u_l = pd.concat((r_u_l, pd.read_json(v["result_user"])))
r_a_l = pd.concat((r_a_l, pd.read_json(v["result_act"])))
r_u_l = r_u_l.groupby(r_u_l.index).mean()
r_a_l = r_a_l.groupby(r_a_l.index).mean()
yield key, {"result_user": r_u_l.to_json(), "result_act": r_a_l.to_json()}
def steps(self):
if self.options.avrage == 1:
return [
MRStep(
mapper=self.mappertwo,
reducer=self.reducer
)
]
else:
return [
MRStep(
mapper=self.mappertwo
)
]
if __name__ == '__main__':
MRJobNetworkX.run()
| mit |
ZenDevelopmentSystems/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
jshleap/StructBio | ShapeSim_old.py | 1 | 7211 | #!/usr/bin/python
'''
Simulator of wing, using cholesky decomposition to generate correlated variables.
Requieres an initial shape in GM format, and a partition file of the form:
Partition
Landmarks: [list of the landmark number in the order of entries in the GM file]
corr: [the ammount of correlation among partition 1]
Partition
Landmarks: [list of the landmark number in the order of entries in the GM file]
corr: [the ammount of correlation among partition 2]
.
.
.
Partitionn
Landmarks: [list of the landmark number in the order of entries in the GM file]
corr: [the ammount of correlation among partition n]
'''
#importing bit####################################################################################################
from rpy2.robjects import r
import sys, os
from random import normalvariate, shuffle
import matplotlib.pyplot as plt
r('library(corpcor)')
#r('library(MASS)')
# End importing####################################################################################################
#Start fuctions####################################################################################################
def plot_gm(filename, col):
'''
plot a scatter GM file of 2D
'''
#read and split file
x=[]
y=[]
f = open(filename)
plots = []
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_axis_off()
count=0
for line in f:
count+=1
bline = line.strip().split(';')[1:]
for i in range(0,len(bline),2):
#x.append(bline[i])
#y.append(bline[i+1])
x = float(bline[i])
y = float(bline[i+1])
if col:
lin, = ax.plot(x,y, ls='None', marker='o', color=colors[(i+2)/2])
else:
lin, = ax.plot(x,y, ls='None', marker='o')
if count == 1:
ax.annotate('%s'%(str((i+2)/2)), xy=(x,y), fontsize=14, style='oblique', xytext=(x+2*2,y+2*2))
plt.savefig('%s.png'%(filename[:filename.find('.')]), transparent=True)
def ReadPartitionFile(pfilename):
'''
Take a partition file and extract the landmarks and correlation in each partition
'''
f = open(pfilename).read().strip().split('Partition')
partitions=[]
# get the landmarks and corfor each partition
for el in f[1:]:
la = el[el.find(': ')+1:el.find('\ncorr:')].strip(' ').split(',')
co = el[el.rfind(': ')+1:].strip(' ').split()
partitions.append((la,co))
landmarks =[]
for p in partitions:
landmarks.extend((int(x) for x in p[0]))
landmarksN = max(landmarks)*2
return partitions, landmarksN
def CreateCorrelationMatrix(partitions, landmarksN):
'''
Given the partition list, creates a correlation matrix in R
'''
r('c <- matrix(NA,%d,%d)'%(landmarksN,landmarksN))
for p in partitions:
tempx=[]
tempy=[]
for e in p[0]:
tempx.append((int(e)*2)-1)
tempy.append((int(e)*2))
#p[0].extend(temp)
for i in range(1,landmarksN+1):
for x in tempx:#p[0]:
if i in tempx:#(int(x) for x in p[0]):
if i == int(x):
r('c[%d,%d]<- %f'%(int(i),int(x),1.0))
else:
r('c[%d,%d]<- %f'%(int(i),int(x),float(p[1][0])))
else:
r('c[%d,%d]<- %f'%(int(i),int(x),0.00))
#for j in range(2,landmarksN+1,2):
for y in tempy:#p[0]:
if i in tempy:#(int(x) for x in p[0]):
if i == int(y):
r('c[%d,%d]<- %f'%(int(i),int(y),1.0))
else:
r('c[%d,%d]<- %f'%(int(i),int(y),float(p[1][0])))
else:
r('c[%d,%d]<- %f'%(int(i),int(y),0.00))
r('png("CorrelationMatrix.png")')
r('hist(c)')
r('dev.off()')
#r('save(c,file ="test" )')
'''def CreateRandomMatrix(landmarksN, samplen, sd):
Create a random matrix which each entry has a given mean and sd
'''
def CreateCorrelatedRandomMatrix(prefix, samplen, sd, landmarksN):
'''
Reads the shapefile (must have only one entry of the reference shape in gm format,
and must have a txt extension) and creates a random matrix with apropriate dimensions
and apply cholesky decomposition to create a correlated matrix
'''
r('s <- read.table("%s",sep=";",row.names=1)'%(prefix+'.txt'))# read the shape file
r('m <- matrix(NA,%d,%d)'%(samplen,landmarksN))
for i in range(landmarksN):
r('m[,%d]<-rnorm(%d,rnorm(1,0,%f),%f)'%(i+1,int(samplen),float(sd),float(sd)))
r('s <- matrix(as.numeric(s), %d, dim(s)[2], byrow=TRUE)'%(samplen))#create a matrix repeating the initial shape
#r('m<- matrix(rnorm(dim(s)[1]*dim(s)[2], sd=%f), dim(s)[1],dim(s)[2])'%(sd))#create a random matrix with desired sd
#r('xp<-scale(m)') # Scale the random matrix
r('t<-m%*%solve(chol(var(m)))')# comment out this if exact correlation is not needed
r('cor.mat<-make.positive.definite(c)') # Transform the correlation matrix in positive definite
r('t.new<-t%*%chol(cor.mat)') # Create the new correlated variables using Choleski decomposition or the correlation matrix
#r('t.new[,1]<-t.new[,1]*attr(xp,"scaled:scale")+attr(xp,"scaled:center")') # Create the new dataset with correlation
r('correlated <- t.new+s')
r('write.table(correlated, "%s", sep = ";", row.names = TRUE, col.names = FALSE)'%(prefix+'correlated.gm'))
r('png("CorrelatedMatrix.png")')
r('hist(cor(correlated))')
r('dev.off()')
r('save(c,m,s,t,t.new,pc,file ="test" )')
# End functions####################################################################################################
# Aplication of the code ##########################################################################################
if len(sys.argv) == 1 or '-help' in sys.argv:
print 'usage ShapeSim.py [prefix] [option]'
print 'Options:'
print '\t-sample=XXX : Create XXX samples ( Default: 100 )'
print '\t-mean= YYY : Create a distribution of points with mean YYY ( Default: 0.0 )'
print '\t-sd= ZZZ: Create a distribution of points with ZZZ standard deviation ( Default: 1.00 )'
print '\t-partition=ZZZ : Use ZZZ file to get the partitions to be simulated ( Default: [prefix].par )'
##Default Parameters ###################################################
sd = 1.00
samplen = 100
prefix = sys.argv[1]
pfilename = prefix+'.par'
colors = ['#8b8989','#cdb79e','#000000','#2f4f4f','#d3d3d3','#191970','#0000cd','#87cefa','#b0c4de','#b0c4de',
'#5f9ea0','#66cdaa','#7fffd4','#006400','#556b2f','#8fbc8f','#20b2aa','#7cfc00','#bdb76b','#f0e68c',
'#ffff00','#ffd700','#b8860b','#b8860b','#bc8f8f','#cd5c5c','#8b4513','#a0522d','#b22222','#fa8072',
'#ff0000', '#ff69b4', '#ff1493','#9400d3']
shuffle(colors)
col = False
procrustes = False
## #####################################################################
for arg in sys.argv:
if arg.startswith('-sample='):
samplen = int(arg[8:])
elif arg.startswith('-sd='):
sd = float(arg[4:])
elif arg.startswith('-partition='):
pfilename = arg[11:]
elif arg == '-procrustes':
procrustes = True
samples = []
f=open(prefix+'.txt').read().strip().split(';')
if f[-1] == '':
f=f[1:-1]
else:
f=f[1:]
if len(f) <= len(colors):
col = True
samples.append(f)
plot_gm(prefix+'.txt',col)
partitions, landmarkN = ReadPartitionFile(pfilename)
CreateCorrelationMatrix(partitions, landmarkN)
#CreateRandomMatrix(landmarkN, samplen, sd)
CreateCorrelatedRandomMatrix(prefix, samplen, sd, landmarkN)
plot_gm(prefix+'correlated.gm',col)
if procrustes:
os.system('python /home/jshleap/LabBlouin/code/GM/procrustes.py %s 2'%(prefix+'correlated')) | gpl-3.0 |
icyblade/data_mining_tools | python/icyscaler/__init__.py | 1 | 10769 | #! coding: utf8
import logging
import os
from collections import defaultdict
import coloredlogs
import numpy as np
from scipy.sparse import hstack
from scipy.stats import boxcox
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, OneHotEncoder
class BoxCoxScaler:
"""BoxCoxScaler with data shift."""
def __init__(self, verbose='INFO'):
self.delta = 0.01
self.verbose = verbose
self._configure_logger()
def _configure_logger(self, file=False, stdout=True):
"""Initialize logger.
Parameters
--------
file: bool (default: False).
If True, logs will be written into text file.
stdout: bool (default: True).
If True, logs will be printed onto screen(stdout).
"""
logger = logging.getLogger(self.__class__.__name__)
logger.handlers = [] # clear old handlers
logger.setLevel(self.verbose)
if file: # file logger
if not os.path.exists('./log/'):
os.mkdir('./log/')
handler = logging.FileHandler(
'./log/%s.log' % os.path.basename(self.__class__.__name__)
)
handler.setLevel(self.verbose)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(name)s [%(levelname)s]: %(message)s',
'%H:%M:%S'
))
logger.addHandler(handler)
if stdout: # screen(stdout) logger
coloredlogs.install(
logger=logger,
level=self.verbose,
fmt='%(asctime)s %(name)s [%(levelname)s]: %(message)s',
datefmt='%H:%M:%S',
reconfigure=False,
)
"""
handler = logging.StreamHandler()
handler.setLevel(self.verbose)
handler.setFormatter(logging.Formatter(
'%(asctime)s [%(levelname)s] %(processName)s: %(message)s',
'%H:%M:%S'
))
logger.addHandler(handler)
"""
logger.setLevel(self.verbose) # fix wierd bug for coloredlogs
self.logger = logger
def fit(self, x):
"""Fit!
Parameters
--------
x: ndarray.
Data to be fit, in the shape of (n_samples, n_features).
Returns
--------
self: instance of BoxCoxScaler.
BoxCoxScaler itself.
"""
n_samples, n_features = x.shape
self.lmbdas = np.zeros(n_features)
for i in xrange(n_features):
_, self.lmbdas[i] = boxcox(x[:, i] + self.delta)
return self
def transform(self, x):
"""Transform!
Parameters
--------
x: ndarray.
Data to be transformed, in the shape of (n_samples, n_features)
Returns
--------
y: ndarray.
Transformed data, in the shape of (n_samples, n_features)
"""
n_samples, n_features = x.shape
return np.hstack([
boxcox(x[:, i] + self.delta, self.lmbdas[i]).reshape(-1, 1)
for i in xrange(n_features)
])
def fit_transform(self, x):
"""Fit and transform!
Parameters
--------
x: ndarray.
Data to be transformed, in the shape of (n_samples, n_features)
Returns
--------
y: ndarray.
Transformed data, in the shape of (n_samples, n_features)
"""
return self.fit(x).transform(x)
def inverse_transform(self, y):
"""Do the inverse transform.
Parameters
--------
y: ndarray.
Data to be inverse transformed, in the shape of (n_features,
n_samples)
Returns
--------
x: ndarray.
Inverse transformed data, in the shape of (n_samples, n_features)
"""
n_samples, n_features = y.shape
ret = []
for i in xrange(n_features):
if self.lmbdas[i] == 0:
ret.append(np.exp(y[:, i]))
else:
ret.append(np.exp(np.log([
np.max(j, (self.delta+1)**self.lmbdas[i])
for j in self.lmbdas[i]*y[:, i]+1
])/self.lmbdas[i]) - self.lmbdas[i])
return np.hstack(ret)
class IcyScaler:
"""Combination of multiple scalers, used for data preprocessing."""
def __init__(self, verbose='INFO'):
self.verbose = verbose
self._configure_logger()
def _configure_logger(self, file=False, stdout=True):
"""Initialize logger.
Parameters
--------
file: bool (default: False).
If True, logs will be written into text file.
stdout: bool (default: True).
If True, logs will be printed onto screen(stdout).
"""
logger = logging.getLogger(self.__class__.__name__)
logger.handlers = [] # clear old handlers
logger.setLevel(self.verbose)
if file: # file logger
if not os.path.exists('./log/'):
os.mkdir('./log/')
handler = logging.FileHandler(
'./log/%s.log' % os.path.basename(self.__class__.__name__)
)
handler.setLevel(self.verbose)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(name)s [%(levelname)s]: %(message)s',
'%H:%M:%S'
))
logger.addHandler(handler)
if stdout: # screen(stdout) logger
coloredlogs.install(
logger=logger,
level=self.verbose,
fmt='%(asctime)s %(name)s [%(levelname)s]: %(message)s',
datefmt='%H:%M:%S',
reconfigure=False,
)
"""
handler = logging.StreamHandler()
handler.setLevel(self.verbose)
handler.setFormatter(logging.Formatter(
'%(asctime)s [%(levelname)s] %(processName)s: %(message)s',
'%H:%M:%S'
))
logger.addHandler(handler)
"""
logger.setLevel(self.verbose) # fix wierd bug for coloredlogs
self.logger = logger
def fit(self, x, categorial_columns=None, continuous_columns=None):
"""Fit!
Parameters
--------
x: pandas.DataFrame.
Data to be fit, in the shape of (n_samples, n_features).
categorial_columns: list of str (default: None).
List of categorial columns. If None, categorial columns will be
extracted automatically.
continuous_columns: list of str (default: None).
List of continuous columns. If None, continuous columns will be
extracted automatically.
Returns
--------
self: instance of IcyScaler.
IcyScaler itself.
"""
categorial_columns = categorial_columns or self._extract_categorial(x)
continuous_columns = continuous_columns or self._extract_continuous(x)
# encode labels
if categorial_columns:
categorial_data = x[categorial_columns]
self.label_encoders = defaultdict(LabelEncoder)
categorial_data = categorial_data.apply(
lambda column:
self.label_encoders[column.name].fit_transform(column)
).values
self.one_hot_encoder = OneHotEncoder().fit(categorial_data)
# encode continuous
if continuous_columns:
continuous_data = x[continuous_columns].values
self.box_cox_scaler = BoxCoxScaler().fit(continuous_data)
continuous_data = self.box_cox_scaler.transform(continuous_data)
self.min_max_scaler = MinMaxScaler().fit(continuous_data)
return self
def transform(self, x, categorial_columns=None, continuous_columns=None):
"""Transform!
Parameters
--------
x: ndarray.
Data to be transformed, in the shape of (n_samples, n_features)
categorial_columns: list of str (default: None).
List of categorial columns. If None, categorial columns will be
extracted automatically.
continuous_columns: list of str (default: None).
List of continuous columns. If None, continuous columns will be
extracted automatically.
Returns
--------
y: ndarray.
Transformed data, in the shape of (n_samples, n_features).
WARNING: order of columns will be changed!
"""
categorial_columns = categorial_columns or self._extract_categorial(x)
continuous_columns = continuous_columns or self._extract_continuous(x)
categorial_data = continuous_data = None
# transform labels
if categorial_columns:
categorial_data = x[categorial_columns]
categorial_data = categorial_data.apply(
lambda column:
self.label_encoders[column.name].transform(column)
).values
categorial_data = self.one_hot_encoder.transform(categorial_data)
# transform continuous
if continuous_columns:
continuous_data = x[continuous_columns].values
continuous_data = self.box_cox_scaler.transform(continuous_data)
continuous_data = self.min_max_scaler.transform(continuous_data)
try:
return hstack([
i
for i in [categorial_data, continuous_data]
if i is not None
])
except ValueError: # dense matrix
return np.concatenate([
i
for i in [categorial_data, continuous_data]
if i is not None
], axis=1)
def fit_transform(self, x):
return self.fit(x).transform(x)
def _extract_categorial(self, x):
"""Extract categorial columns.
Parameters
--------
x: pandas.DataFrame.
DataFrame to be extracted.
Returns
--------
lst: list of str.
Categorial column names.
"""
lst = x.columns[x.dtypes == object].tolist()
self.logger.info('%s categorial columns found.' % len(lst))
return lst
def _extract_continuous(self, x):
"""Extract continuous columns.
Parameters
--------
x: pandas.DataFrame.
DataFrame to be extracted.
Returns
--------
lst: list of str.
Continuous column names.
"""
lst = x.columns[x.dtypes != object].tolist()
self.logger.info('%s continuous columns found.' % len(lst))
return lst
| gpl-3.0 |
Akshay0724/scikit-learn | sklearn/svm/tests/test_sparse.py | 63 | 13366 | import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_raises, assert_true, assert_false,
assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/nolearn-0.5/nolearn/cache.py | 2 | 4547 | """This module contains a decorator :func:`cached` that can be used to
cache the results of any Python functions to disk.
This is useful when you have functions that take a long time to
compute their value, and you want to cache the results of those
functions between runs.
Python's :mod:`pickle` is used to serialize data. All cache files go
into the `cache/` directory inside your working directory.
`@cached` uses a cache key function to find out if it has the value
for some given function arguments cached on disk. The way it
calculates that cache key by default is to simply use the string
representation of all arguments passed into the function. Thus, the
default cache key function looks like this:
.. code-block:: python
def default_cache_key(*args, **kwargs):
return str(args) + str(sorted(kwargs.items()))
Here is an example use of the :func:`cached` decorator:
.. code-block:: python
import math
@cached()
def fac(x):
print 'called!'
return math.factorial(x)
fac(20)
called!
2432902008176640000
fac(20)
2432902008176640000
Often you will want to use a more intelligent cache key, one that
takes more things into account. Here's an example cache key function
for a cache decorator used with a `transform` method of a scikit-learn
:class:`~sklearn.base.BaseEstimator`:
.. doctest::
>>> def transform_cache_key(self, X):
... return ','.join([
... str(X[:20]),
... str(X[-20:]),
... str(X.shape),
... str(sorted(self.get_params().items())),
... ])
This function puts the first and the last twenty rows of the matrix
`X` into the cache key. On top of that, it adds the shape of the
matrix `X.shape` along with the items in `self.get_params`, which with
a scikit-learn :class:`~sklearn.base.BaseEstimator` class is the
dictionary of model parameters. This makes sure that even though the
input matrix is the same, it will still calculate the value again if
the value of `self.get_params()` is different.
Your estimator class can then use the decorator like so:
.. code-block:: python
class MyEstimator(BaseEstimator):
@cached(transform_cache_key)
def transform(self, X):
# ...
"""
from functools import wraps
import hashlib
import logging
import random
import os
import string
import traceback
from joblib import numpy_pickle
CACHE_PATH = 'cache/'
if not os.path.exists(CACHE_PATH): # pragma: no cover
os.mkdir(CACHE_PATH)
logger = logging.getLogger(__name__)
def default_cache_key(*args, **kwargs):
return str(args) + str(sorted(kwargs.items()))
class DontCache(Exception):
pass
def cached(cache_key=default_cache_key, cache_path=None):
def cached(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Calculation of the cache key is delegated to a function
# that's passed in via the decorator call
# (`default_cache_key` by default).
try:
key = str(cache_key(*args, **kwargs))
except DontCache:
return func(*args, **kwargs)
hashed_key = hashlib.sha1(key).hexdigest()[:8]
# We construct the filename using the cache key. If the
# file exists, unpickle and return the value.
filename = os.path.join(
cache_path or CACHE_PATH,
'{}.{}-cache-{}'.format(
func.__module__, func.__name__, hashed_key))
if os.path.exists(filename):
filesize = os.path.getsize(filename)
size = "%0.1f MB" % (filesize / (1024 * 1024.0))
logger.debug(" * cache hit: {} ({})".format(filename, size))
return numpy_pickle.load(filename)
else:
logger.debug(" * cache miss: {}".format(filename))
value = func(*args, **kwargs)
tmp_filename = '{}-{}.tmp'.format(
filename,
''.join(random.sample(string.ascii_letters, 4)),
)
try:
numpy_pickle.dump(value, tmp_filename, compress=9)
os.rename(tmp_filename, filename)
except Exception:
logger.exception(
"Saving pickle {} resulted in Exception".format(
filename))
return value
wrapper.uncached = func
return wrapper
return cached
| bsd-3-clause |
0x0all/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
rvraghav93/scikit-learn | examples/cluster/plot_mean_shift.py | 53 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
# #############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
# #############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
# #############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
ZENGXH/scikit-learn | sklearn/decomposition/dict_learning.py | 83 | 44062 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
ipashchenko/ml4vs | ml4vs/ensembling.py | 1 | 11104 | import os
import numpy as np
import sys
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from sklearn import decomposition, pipeline
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
# NN
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras import callbacks
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.svm import SVC
from data_load import load_data, load_data_tgt
data_dir = '/home/ilya/code/ml4vs/data/LMC_SC20__corrected_list_of_variables/raw_index_values'
file_1 = 'vast_lightcurve_statistics_variables_only.log'
file_0 = 'vast_lightcurve_statistics_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD', 'clipped_sigma', 'lag1', 'L', 'Lclp', 'Jclp',
'MAD', 'Ltim']
X, y, df, feature_names, delta = load_data([file_0, file_1], names,
names_to_delete)
target = 'variable'
predictors = list(df)
predictors.remove(target)
# Split on train/test
sss = StratifiedShuffleSplit(y, n_iter=1, test_size=0.25, random_state=123)
for train_index, test_index in sss:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# Split on train1/train2
sss_ = StratifiedShuffleSplit(y_train, n_iter=1, test_size=0.5,
random_state=123)
for train_index, test_index in sss_:
X_train_, X_test_ = X_train[train_index], X_train[test_index]
y_train_, y_test_ = y_train[train_index], y_train[test_index]
# Fit algos on train_
# Function that transforms some features
def log_axis(X_, names=None):
X = X_.copy()
tr_names = ['clipped_sigma', 'weighted_sigma', 'RoMS', 'rCh2', 'Vp2p',
'Ex', 'inv_eta', 'S_B']
for name in tr_names:
try:
# print "Log-Transforming {}".format(name)
i = names.index(name)
X[:, i] = np.log(X[:, i])
except ValueError:
print "No {} in predictors".format(name)
pass
return X
# Create model for NN
def create_baseline():
model = Sequential()
model.add(Dense(18, input_dim=18, init='normal', activation='relu',
W_constraint=maxnorm(9.388)))
model.add(Dropout(0.04))
model.add(Dense(13, init='normal', activation='relu',
W_constraint=maxnorm(2.72)))
# model.add(Activation(space['Activation']))
model.add(Dropout(0.09))
model.add(Dense(1, init='normal', activation='sigmoid'))
# Compile model
learning_rate = 0.213
decay_rate = 0.001
momentum = 0.9
sgd = SGD(lr=learning_rate, decay=decay_rate, momentum=momentum,
nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd,
metrics=['accuracy'])
return model
earlyStopping = callbacks.EarlyStopping(monitor='val_loss', patience=50,
verbose=1, mode='auto')
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('scaler', StandardScaler()))
estimators.append(('mlp', KerasClassifier(build_fn=create_baseline,
nb_epoch=175,
batch_size=1024,
verbose=2)))
pipeline_nn = Pipeline(estimators)
# Create model for GB
sys.path.append('/home/ilya/xgboost/xgboost/python-package/')
import xgboost as xgb
clf = xgb.XGBClassifier(n_estimators=87, learning_rate=0.111,
max_depth=6,
min_child_weight=2,
subsample=0.275,
colsample_bytree=0.85,
colsample_bylevel=0.55,
gamma=3.14,
scale_pos_weight=6,
max_delta_step=6)
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('clf', clf))
pipeline_xgb = Pipeline(estimators)
# Create model for RF
clf = RandomForestClassifier(n_estimators=1200,
max_depth=17,
max_features=3,
min_samples_split=2,
min_samples_leaf=3,
class_weight='balanced_subsample',
verbose=1, random_state=1, n_jobs=4)
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('clf', clf))
pipeline_rf = Pipeline(estimators)
# Create model for LR
clf = LogisticRegression(C=1.29, class_weight={0: 1, 1: 2},
random_state=1, max_iter=300, n_jobs=1,
tol=10.**(-5))
pca = decomposition.PCA(n_components=16, random_state=1)
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('func', FunctionTransformer(log_axis, kw_args={'names':
predictors})))
estimators.append(('scaler', StandardScaler()))
estimators.append(('pca', pca))
estimators.append(('clf', clf))
pipeline_lr = Pipeline(estimators)
# Model for kNN
clf = KNeighborsClassifier(n_neighbors=6,
weights='distance', n_jobs=4)
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('scaler', StandardScaler()))
estimators.append(('clf', clf))
pipeline_knn = Pipeline(estimators)
# Model for SVM
clf = SVC(C=37.3, class_weight={0: 1, 1: 3}, probability=True,
gamma=0.0126, random_state=1)
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('scaler', StandardScaler()))
estimators.append(('clf', clf))
pipeline_svm = Pipeline(estimators)
# Fit on all training data
pipeline_lr.fit(X, y)
pipeline_knn.fit(X, y)
pipeline_rf.fit(X, y)
pipeline_xgb.fit(X, y)
pipeline_nn.fit(X, y)
pipeline_svm.fit(X, y)
# Load blind test data
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics.log'
file_tgt = os.path.join(data_dir, file_tgt)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD', 'clipped_sigma', 'lag1', 'L', 'Lclp', 'Jclp',
'MAD', 'Ltim']
X_tgt, feature_names, df, df_orig = load_data_tgt(file_tgt, names,
names_to_delete, delta)
# Predict for different algos
y_pred_rf = pipeline_rf.predict(X_tgt)
y_pred_lr = pipeline_lr.predict(X_tgt)
y_pred_knn = pipeline_knn.predict(X_tgt)
y_pred_xgb = pipeline_xgb.predict(X_tgt)
y_pred_nn = pipeline_nn.predict(X_tgt)[:, 0]
y_pred_svm = pipeline_svm.predict(X_tgt)
# Probabilities
y_prob_rf = pipeline_rf.predict_proba(X_tgt)[:, 1]
y_prob_lr = pipeline_lr.predict_proba(X_tgt)[:, 1]
y_prob_knn = pipeline_knn.predict_proba(X_tgt)[:, 1]
y_prob_xgb = pipeline_xgb.predict_proba(X_tgt)[:, 1]
y_prob_nn = pipeline_nn.predict_proba(X_tgt)[:, 1]
y_prob_knn = pipeline_nn.predict_proba(X_tgt)[:, 1]
y_prob_svm = pipeline_nn.predict_proba(X_tgt)[:, 1]
y_preds = (0.67*y_pred_lr + 0.68*y_pred_knn + 0.81*y_pred_xgb +
0.76*y_pred_rf + 0.81*y_pred_nn + 0.79*y_pred_svm) / 4.52
y_preds = np.asarray(y_preds, dtype=float)
# y_preds /= 6
idx = y_preds >= 0.5
idx_ = y_preds < 0.5
def rank(y_probs):
from scipy.stats import rankdata
ranks = np.zeros(len(y_probs[0]), dtype=float)
for y_prob in y_probs:
rank = rankdata(y_prob, method='min') - 1
ranks += rank
return ranks / max(ranks)
# new_prob = rank([y_prob_lr, y_prob_knn, y_prob_nn, y_prob_rf, y_prob_xgb,
# y_pred_svm])
# idx = new_prob >= 0.95
# idx_ = new_prob < 0.95
# idx = y_probs > 0.250
# idx_ = y_probs < 0.250
ens_no = list(df_orig['star_ID'][idx_])
print("Found {} variables".format(np.count_nonzero(idx)))
with open('ens_results_final.txt', 'w') as fo:
for line in list(df_orig['star_ID'][idx]):
fo.write(line + '\n')
# Check F1
with open('clean_list_of_new_variables.txt', 'r') as fo:
news = fo.readlines()
news = [line.strip().split(' ')[1] for line in news]
news = set(news)
with open('ens_results_final.txt', 'r') as fo:
ens = fo.readlines()
ens = [line.strip().split('_')[4].split('.')[0] for line in ens]
ens = set(ens)
print "Among new vars found {}".format(len(news.intersection(ens)))
with open('candidates_50perc_threshold.txt', 'r') as fo:
c50 = fo.readlines()
c50 = [line.strip("\", ', \", \n, }, {") for line in c50]
with open('variables_not_in_catalogs.txt', 'r') as fo:
not_in_cat = fo.readlines()
nic = [line.strip().split(' ')[1] for line in not_in_cat]
# Catalogue variables
cat_vars = set(c50).difference(set(nic))
# Non-catalogue variable
noncat_vars = set([line.strip().split(' ')[1] for line in not_in_cat if 'CST' not in line])
# All variables
all_vars = news.union(cat_vars).union(noncat_vars)
ens_no = set([line.strip().split('_')[4].split('.')[0] for line in ens_no])
found_bad = '181193' in ens
print "Found known variable : ", found_bad
FN = len(ens_no.intersection(all_vars))
TP = len(all_vars.intersection(ens))
TN = len(ens_no) - FN
FP = len(ens) - TP
recall = float(TP) / (TP + FN)
precision = float(TP) / (TP + FP)
F1 = 2 * precision * recall / (precision + recall)
print "precision: {}".format(precision)
print "recall: {}".format(recall)
print "F1: {}".format(F1)
print "TN={}, FP={}".format(TN, FP)
print "FN={}, TP={}".format(FN, TP)
| mit |
EPFL-LCN/neuronaldynamics-exercises | neurodynex3/adex_model/AdEx.py | 1 | 5502 | """
Implementation of the Adaptive Exponential Integrate-and-Fire model.
See Neuronal Dynamics
`Chapter 6 Section 1 <http://neuronaldynamics.epfl.ch/online/Ch6.S1.html>`_
"""
# This file is part of the exercise code repository accompanying
# the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch)
# located at http://github.com/EPFL-LCN/neuronaldynamics-exercises.
# This free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License 2.0 as published by the
# Free Software Foundation. You should have received a copy of the
# GNU General Public License along with the repository. If not,
# see http://www.gnu.org/licenses/.
# Should you reuse and publish the code for your own purposes,
# please cite the book or point to the webpage http://neuronaldynamics.epfl.ch.
# Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski.
# Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition.
# Cambridge University Press, 2014.
import brian2 as b2
import matplotlib.pyplot as plt
import neurodynex3.tools.input_factory as input_factory
b2.defaultclock.dt = 0.01 * b2.ms
# default values. (see Table 6.1, Initial Burst)
# http://neuronaldynamics.epfl.ch/online/Ch6.S2.html#Ch6.F3
MEMBRANE_TIME_SCALE_tau_m = 5 * b2.ms
MEMBRANE_RESISTANCE_R = 500 * b2.Mohm
V_REST = -70.0 * b2.mV
V_RESET = -51.0 * b2.mV
RHEOBASE_THRESHOLD_v_rh = -50.0 * b2.mV
SHARPNESS_delta_T = 2.0 * b2.mV
ADAPTATION_VOLTAGE_COUPLING_a = 0.5 * b2.nS
ADAPTATION_TIME_CONSTANT_tau_w = 100.0 * b2.ms
SPIKE_TRIGGERED_ADAPTATION_INCREMENT_b = 7.0 * b2.pA
# a technical threshold to tell the algorithm when to reset vm to v_reset
FIRING_THRESHOLD_v_spike = -30. * b2.mV
# This function implement Adaptive Exponential Leaky Integrate-And-Fire neuron model
def simulate_AdEx_neuron(
tau_m=MEMBRANE_TIME_SCALE_tau_m,
R=MEMBRANE_RESISTANCE_R,
v_rest=V_REST,
v_reset=V_RESET,
v_rheobase=RHEOBASE_THRESHOLD_v_rh,
a=ADAPTATION_VOLTAGE_COUPLING_a,
b=SPIKE_TRIGGERED_ADAPTATION_INCREMENT_b,
v_spike=FIRING_THRESHOLD_v_spike,
delta_T=SHARPNESS_delta_T,
tau_w=ADAPTATION_TIME_CONSTANT_tau_w,
I_stim=input_factory.get_zero_current(),
simulation_time=200 * b2.ms):
r"""
Implementation of the AdEx model with a single adaptation variable w.
The Brian2 model equations are:
.. math::
\tau_m \frac{dv}{dt} = -(v-v_{rest}) + \Delta_T \cdot
e^{\frac{v-v_{rheobase}}{\Delta_T}} + R I_{stim}(t,i) - R w \\
\tau_w \frac{dw}{dt} = a (v-v_{rest}) - w
Args:
tau_m (Quantity): membrane time scale
R (Quantity): membrane restistance
v_rest (Quantity): resting potential
v_reset (Quantity): reset potential
v_rheobase (Quantity): rheobase threshold
a (Quantity): Adaptation-Voltage coupling
b (Quantity): Spike-triggered adaptation current (=increment of w after each spike)
v_spike (Quantity): voltage threshold for the spike condition
delta_T (Quantity): Sharpness of the exponential term
tau_w (Quantity): Adaptation time constant
I_stim (TimedArray): Input current
simulation_time (Quantity): Duration for which the model is simulated
Returns:
(state_monitor, spike_monitor):
A b2.StateMonitor for the variables "v" and "w" and a b2.SpikeMonitor
"""
v_spike_str = "v>{:f}*mvolt".format(v_spike / b2.mvolt)
# EXP-IF
eqs = """
dv/dt = (-(v-v_rest) +delta_T*exp((v-v_rheobase)/delta_T)+ R * I_stim(t,i) - R * w)/(tau_m) : volt
dw/dt=(a*(v-v_rest)-w)/tau_w : amp
"""
neuron = b2.NeuronGroup(1, model=eqs, threshold=v_spike_str, reset="v=v_reset;w+=b", method="euler")
# initial values of v and w is set here:
neuron.v = v_rest
neuron.w = 0.0 * b2.pA
# Monitoring membrane voltage (v) and w
state_monitor = b2.StateMonitor(neuron, ["v", "w"], record=True)
spike_monitor = b2.SpikeMonitor(neuron)
# running simulation
b2.run(simulation_time)
return state_monitor, spike_monitor
def plot_adex_state(adex_state_monitor):
"""
Visualizes the state variables: w-t, v-t and phase-plane w-v
Args:
adex_state_monitor (StateMonitor): States of "v" and "w"
"""
plt.subplot(2, 2, 1)
plt.plot(adex_state_monitor.t / b2.ms, adex_state_monitor.v[0] / b2.mV, lw=2)
plt.xlabel("t [ms]")
plt.ylabel("u [mV]")
plt.title("Membrane potential")
plt.subplot(2, 2, 2)
plt.plot(adex_state_monitor.v[0] / b2.mV, adex_state_monitor.w[0] / b2.pA, lw=2)
plt.xlabel("u [mV]")
plt.ylabel("w [pAmp]")
plt.title("Phase plane representation")
plt.subplot(2, 2, 3)
plt.plot(adex_state_monitor.t / b2.ms, adex_state_monitor.w[0] / b2.pA, lw=2)
plt.xlabel("t [ms]")
plt.ylabel("w [pAmp]")
plt.title("Adaptation current")
plt.show()
def getting_started():
"""
Simple example to get started
"""
from neurodynex3.tools import plot_tools
current = input_factory.get_step_current(10, 200, 1. * b2.ms, 65.0 * b2.pA)
state_monitor, spike_monitor = simulate_AdEx_neuron(I_stim=current, simulation_time=300 * b2.ms)
plot_tools.plot_voltage_and_current_traces(state_monitor, current)
plot_adex_state(state_monitor)
print("nr of spikes: {}".format(spike_monitor.count[0]))
if __name__ == "__main__":
getting_started()
| gpl-2.0 |
natsheh/semantic_query | index_answers.py | 1 | 7213 | # -*- coding: utf-8 -*-
#
# This file is part of semantic_query.
# Copyright (C) 2016 CIAPPLE.
#
# This is a free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
# Load and index answers from corpus
# Author: Hussein AL-NATSHEH <[email protected]>
# Affiliation: CIAPPLE, Jordan
import os, argparse, pickle, json
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD, MiniBatchDictionaryLearning
import numpy as np
from stop_words import get_stop_words
import IPython
def count_docs (m_corpus, w_corpus, paragraphs_per_article):
articles_count = 0
docs_count = 0
for sub in os.listdir(m_corpus):
subdir = os.path.join(m_corpus, sub)
for fname in os.listdir(subdir):
articles_count += 1
doc = ''
for i, line in enumerate(open(os.path.join(subdir, fname))):
if len(doc) >300:
doc = ''
docs_count += 1
if i == 0:
title = str(line)
if i == 1:
line1 = title + '__'+ str(line)
doc = line1
if i > 1:
doc = line1+' '+str(line)
if w_corpus is not None:
for sub in os.listdir(w_corpus):
subdir = os.path.join(w_corpus, sub)
for fname in os.listdir(subdir):
articles_count += 1
doc = ''
for i, line in enumerate(open(os.path.join(subdir, fname))):
if len(doc) >300:
doc = ''
docs_count += 1
if i == 0:
title = str(line)
if i == 1:
line1 = title + '__'+ str(line)
doc = line1
docs_count += 1
if i == paragraphs_per_article:
break
if i > 1:
doc = line1+' '+str(line)
return docs_count, articles_count
def load_corpus (m_corpus, w_corpus, docs_count, paragraphs_per_article):
docs = np.array(range(docs_count), dtype=np.object)
doc_id = 0
index = dict()
for sub in os.listdir(m_corpus):
subdir = os.path.join(m_corpus, sub)
for fname in os.listdir(subdir):
article_id = 'm'+'_'+str(fname[:-4])
doc = ''
for i, line in enumerate(open(os.path.join(subdir, fname))):
if len(doc) > 300:
docs[doc_id] = unicode(doc, 'utf8')
doc = ''
index[doc_id] = str(article_id)+'_'+str(i)
doc_id += 1
if i == 0:
title = str(line)
if i == 1:
line1 = title+'__'+str(line)
doc = line1
if i > 1:
doc = line1+' '+str(line)
if w_corpus is not None:
for sub in os.listdir(w_corpus):
subdir = os.path.join(w_corpus, sub)
for fname in os.listdir(subdir):
article_id = 'w'+'_'+str(fname[:-4])
for i, line in enumerate(open(os.path.join(subdir, fname))):
if len(doc) > 300:
docs[doc_id] = unicode(doc, 'utf8')
doc = ''
index[doc_id] = str(article_id)+'_'+str(i)
doc_id += 1
if i == 0:
title = line
if i == 1:
line1 = title+'__'+str(line)
doc = line1
if i == paragraphs_per_article:
break
if i > 1:
doc = line1+' '+str(line)
return docs, index
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--m_corpus", default='../m_output_parser', type=str) # path to m corpus
parser.add_argument("--w_corpus", default='None', type=str) # path to w corpus
parser.add_argument("--paragraphs_per_article", default=4, type=int) # max number of paragraphs per article to load from w corpus
parser.add_argument("--vectorizer_type", default="tfidf", type=str) # possible values: "tfidf" and "count"
parser.add_argument("--decomposition_type", default="svd", type=str) # possible values: "svd", "mbdl" or "None"
parser.add_argument("--mx_ngram", default=2, type=int) # the upper bound of the ngram range
parser.add_argument("--mn_ngram", default=1, type=int) # the lower bound of the ngram range
parser.add_argument("--stop_words", default=1, type=int) # filtering out English stop-words
parser.add_argument("--min_count", default=5, type=int) # minimum frequency of the token to be included in the vocabulary
parser.add_argument("--max_df", default=0.98, type=float) # how much vocabulary percent to keep at max based on frequency
parser.add_argument("--vec_size", default=350, type=int) # the size of the vector in the semantics space
parser.add_argument("--transformed_file", default='transformed.pickle', type=str) # load dumped transformed vectors (pickle file)
parser.add_argument("--docs_file", default='documents.pickle', type=str) # documents file
parser.add_argument("--index_file", default='index.pickle', type=str) # index file
parser.add_argument("--transformer_file", default='transformer.pickle', type=str) # transformer file
parser.add_argument("--debug", default=0, type=int) # IPython embed
args = parser.parse_args()
m_corpus = args.m_corpus
w_corpus = args.w_corpus
if w_corpus == 'None':
w_corpus = None
paragraphs_per_article = args.paragraphs_per_article
vectorizer_type = args.vectorizer_type
decomposition_type = args.decomposition_type
mx_ngram = args.mx_ngram
mn_ngram = args.mn_ngram
stop_words = args.stop_words
if stop_words:
stop_words = get_stop_words('ar')
else:
stop_words = None
min_count = args.min_count
max_df = args.max_df
n_components = args.vec_size
transformed_file = args.transformed_file
docs_file = args.docs_file
index_file = args.index_file
transformer_file = args.transformer_file
debug = args.debug
docs_count, articles_count = count_docs (m_corpus, w_corpus, paragraphs_per_article)
documents, index = load_corpus (m_corpus, w_corpus, docs_count, paragraphs_per_article)
print 'number of documents :', docs_count, ' number of articles :',articles_count
if vectorizer_type == "count":
vectorizer = CountVectorizer(input='content',
analyzer='word', stop_words=stop_words, min_df=min_count,
ngram_range=(mn_ngram, mx_ngram), max_df=max_df)
elif vectorizer_type == "tfidf":
vectorizer = TfidfVectorizer(input='content', decode_error='ignore',
analyzer='word', stop_words=stop_words, min_df=min_count,
ngram_range=(mn_ngram, mx_ngram), max_df=max_df, lowercase=False)
else:
raise NameError('Please check your vectorizer option. It must be either "tfidf" or "count"')
if decomposition_type == 'mbdl':
decomposer = MiniBatchDictionaryLearning(n_components=n_components)
elif decomposition_type == 'svd':
decomposer = TruncatedSVD(n_components=n_components, n_iter=5, random_state=42)
else:
decomposer = None
if decomposer is not None:
transformer = Pipeline(steps=[('vectorizer',vectorizer), ('decomposer',decomposer)])
else:
transformer = vectorizer
if decomposition_type == 'mbdl':
transformed = transformer.steps[1][1].fit_transform(transformer.steps[0][1].fit_transform(documents).toarray())
else:
transformed = transformer.fit_transform(documents)
vocab = transformer.steps[0][1].get_feature_names()
print 'size of the vocabulary:', len(vocab)
print 'shape of the transformed bag_of_words', transformed.shape
transformed.dump(transformed_file)
pickle.dump(index,open(index_file,'wb'))
pickle.dump(documents,open(docs_file,'wb'))
pickle.dump(transformer,open(transformer_file,'wb'))
if debug:
IPython.embed()
| bsd-3-clause |
Srisai85/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/neighbors/graph.py | 36 | 6650 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self):
"""Return the query based on include_self param"""
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self : bool, default=False.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self : bool, default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
harisbal/pandas | pandas/tests/test_downstream.py | 4 | 3443 | # -*- coding: utf-8 -*-
"""
Testing that we work in the downstream packages
"""
import subprocess
import sys
import pytest
import numpy as np # noqa
from pandas import DataFrame
from pandas.compat import PY36
from pandas.util import testing as tm
import importlib
def import_module(name):
# we *only* want to skip if the module is truly not available
# and NOT just an actual import error because of pandas changes
if PY36:
try:
return importlib.import_module(name)
except ModuleNotFoundError: # noqa
pytest.skip("skipping as {} not available".format(name))
else:
try:
return importlib.import_module(name)
except ImportError as e:
if "No module named" in str(e) and name in str(e):
pytest.skip("skipping as {} not available".format(name))
raise
@pytest.fixture
def df():
return DataFrame({'A': [1, 2, 3]})
def test_dask(df):
toolz = import_module('toolz') # noqa
dask = import_module('dask') # noqa
import dask.dataframe as dd
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.A is not None
assert ddf.compute() is not None
def test_xarray(df):
xarray = import_module('xarray') # noqa
assert df.to_xarray() is not None
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
@tm.network
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_statsmodels():
statsmodels = import_module('statsmodels') # noqa
import statsmodels.api as sm
import statsmodels.formula.api as smf
df = sm.datasets.get_rdataset("Guerry", "HistData").data
smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=df).fit()
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_scikit_learn(df):
sklearn = import_module('sklearn') # noqa
from sklearn import svm, datasets
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(digits.data[:-1], digits.target[:-1])
clf.predict(digits.data[-1:])
# Cython import warning and traitlets
@tm.network
@pytest.mark.filterwarnings("ignore")
def test_seaborn():
seaborn = import_module('seaborn')
tips = seaborn.load_dataset("tips")
seaborn.stripplot(x="day", y="total_bill", data=tips)
def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
@pytest.mark.xfail(reason="0.7.0 pending", strict=True)
@tm.network
def test_pandas_datareader():
pandas_datareader = import_module('pandas_datareader') # noqa
pandas_datareader.DataReader(
'F', 'quandl', '2017-01-01', '2017-02-01')
# importing from pandas, Cython import warning
@pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning")
@pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
fp = geopandas.datasets.get_path('naturalearth_lowres')
assert geopandas.read_file(fp) is not None
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_pyarrow(df):
pyarrow = import_module('pyarrow') # noqa
table = pyarrow.Table.from_pandas(df)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
| bsd-3-clause |
jeromekelleher/discsim | validator.py | 1 | 9194 | """
Module used to validate the results of the simulations using various
means. These are not quite tests, since we don't have exact values
to check against, and everything is necessarily approximate.
"""
from __future__ import print_function
from __future__ import division
import sys
import time
import math
import numpy as np
import random
import multiprocessing
from matplotlib import ticker
from matplotlib import pyplot
import ercs
import discsim
import _discsim
class ErcsSingleLocusIdentitySimulator(ercs.Simulator):
"""
Class that calculates identity in state for genes separated by a range
of distances.
"""
def setup(self, num_points, max_distance, mutation_rate, accuracy_goal):
"""
Sets up the simulation so that we calculate identity at the specified
number of points, the maximum distance between points is
max_distance and mutation happens at the specified rate. Also
set the max_time attribute to reflect the specified accuracy_goal.
"""
self.mutation_rate = mutation_rate
self.distances = np.linspace(0, max_distance, num_points)
self.sample = [None, (0, 0)] + [(0, x) for x in self.distances]
self.max_time = math.log(accuracy_goal) / (-2 * mutation_rate)
def get_identity(self, seed):
"""
Returns the probability of identity at all distance classes
in this replicate.
"""
pi, tau = self.run(seed)
mc = ercs.MRCACalculator(pi[0])
n = len(self.distances)
F = [0.0 for j in range(n)]
for j in range(n):
mrca = mc.get_mrca(1, j + 2)
if mrca != 0:
F[j] = math.exp(-2 * self.mutation_rate * tau[0][mrca])
return F
class SingleLocusIdentitySimulator(discsim.Simulator):
"""
Class that calculates identity in state for genes separated by a range
of distances.
"""
def __init__(self, torus_diameter, distances, mutation_rate, accuracy_goal):
super(SingleLocusIdentitySimulator, self).__init__(torus_diameter)
self.__accuracy_goal = accuracy_goal
self.__mutation_rate = mutation_rate
self.__distances = distances
self.__max_time = math.log(accuracy_goal) / (-2 * mutation_rate)
self.sample = [None, (0, 0)] + [(0, x) for x in self.__distances]
def get_identity(self, seed):
"""
Returns the probability of identity at all distance classes
in this replicate.
"""
self.random_seed = seed
self.run(self.__max_time)
pi, tau = self.get_history()
# reset the simulation so we can get another replicate.
self.reset()
mc = ercs.MRCACalculator(pi[0])
n = len(self.__distances)
F = [0.0 for j in range(n)]
for j in range(n):
mrca = mc.get_mrca(1, j + 2)
if mrca != 0:
F[j] = math.exp(-2 * self.__mutation_rate * tau[0][mrca])
return F
def subprocess_identity_worker(t):
sim, seed = t
return sim.get_identity(seed)
def run_identity_replicates(sim, num_replicates, worker_pool):
args = [(sim, random.randint(1, 2**31)) for j in range(num_replicates)]
replicates = worker_pool.map(subprocess_identity_worker, args)
mean_identity = np.mean(np.array(replicates), axis=0)
return mean_identity
def simple_identity_check(r=1, u=0.125, rate=1, num_parents=1,
num_replicates=10000, mutation_rate=1e-6):
"""
Checks identity using very simple model parameters.
"""
events = [ercs.DiscEventClass(r=r, u=u, rate=rate)]
ll_events = [e.get_low_level_representation() for e in events]
torus_diameter = 100
s = _discsim.IdentitySolver(ll_events,
torus_diameter=torus_diameter,
num_quadrature_points=512,
integration_abserr=1e-6,
integration_relerr=0,
integration_workspace_size=1000,
max_x=50, mutation_rate=mutation_rate,
num_parents=num_parents)
s.solve()
# Set up the simulations
num_points = 10
distances = np.linspace(0, 10, num_points)
sim = SingleLocusIdentitySimulator(torus_diameter, distances,
mutation_rate, 1e-6)
sim.num_parents = num_parents
sim.event_classes = events
workers = multiprocessing.Pool(processes=multiprocessing.cpu_count())
F_sim = run_identity_replicates(sim, num_replicates, workers)
F_num = [s.interpolate(x) for x in distances]
for x, fs, fn in zip(distances, F_sim, F_num):
print("{0:.1f}\t{1:.6f}\t{2:.6f}".format(x, fs, fn))
pyplot.plot(distances, F_sim, label="Simulation")
pyplot.plot(distances, F_num, label="Numerical")
pyplot.legend()
pyplot.show()
def mixed_events_identity_check(num_replicates):
torus_diameter = 100
num_points = 50
max_x = 20
mutation_rate = 1e-6
accuracy_goal = 1e-3
small_events = ercs.DiscEventClass(rate=1.0, r=1, u=0.5)
large_events = ercs.DiscEventClass(rate=0.1, r=10, u=0.05)
sim = ErcsSingleLocusIdentitySimulator(torus_diameter)
sim.setup(num_points, max_x, mutation_rate, accuracy_goal)
workers = multiprocessing.Pool(processes=multiprocessing.cpu_count())
l = [small_events, large_events]
sim.event_classes = l
before = time.time()
ercs_F = run_identity_replicates(sim, num_replicates, workers)
duration = time.time() - before
print("ercs done...", duration)
distances = np.linspace(0, max_x, num_points)
sim = SingleLocusIdentitySimulator(torus_diameter, distances,
mutation_rate, 1e-6)
sim.event_classes = l
before = time.time()
discsim_F = run_identity_replicates(sim, num_replicates, workers)
duration = time.time() - before
print("discsim done...", duration)
pyplot.plot(distances, ercs_F, label="ercs")
pyplot.plot(distances, discsim_F, label="discsim")
pyplot.legend()
pyplot.show()
def get_mean_squared_displacement(z, pop):
"""
Returns the mean squared displacement of the specified population from
the specified point.
"""
d2 = 0.0
for p, a in pop:
d2 += (p[0] - z[0])**2
d2 += (p[1] - z[1])**2
n = len(pop)
return d2 / (n * 2)
def single_locus_diffusion(u, r, rate):
"""
Measure the mean squared displacement of lineages for a single
locus simulation.
"""
z = (100, 100)
sample_size = 10000
s = 2.25
L = 100 * s
sim = discsim.Simulator(L)
sim.pixel_size = s
sim.sample = [None] + [z for j in range(sample_size)]
sim.event_classes = [ercs.DiscEventClass(r=r, u=u, rate=rate)]
sim.max_occupancy = 2 * sample_size
sim.max_population_size = 2 * sample_size
sim.print_state()
T = []
X = []
D = []
S = []
for j in range(100):
t = j * 100 * L**2
sim.run(t)
pop = sim.get_population()
msd = get_mean_squared_displacement(z, pop)
t = sim.get_time() / L**2
T.append(t)
X.append(msd)
S.append(t * (r**4) * rate * u * math.pi / 2)
print(T[-1], X[-1], S[-1])
pyplot.plot(T, X, T, S)
pyplot.show()
def subprocess_wave_worker(args):
sim, times, seed = args
sim.random_seed = seed
L = int(sim.torus_diameter)
n = np.zeros((len(times), L))
for j, t in enumerate(times):
sim.run(t)
pop = sim.get_population()
for tup in pop:
if sim.simulate_pedigree:
k = int(tup)
else:
k = int(tup[0])
n[j, k] += 1
n[j, k + 1] += 1
sim.reset()
return n
def run_wave_replicates(sim, times, num_replicates, worker_pool=None):
args = [(sim, times, random.randint(1, 2**31)) for j in range(num_replicates)]
if worker_pool is None:
replicates = [subprocess_wave_worker(a) for a in args]
else:
replicates = worker_pool.map(subprocess_wave_worker, args)
mean_n = []
for j in range(len(times)):
n = []
for r in replicates:
n.append(r[j])
mean_n.append(np.mean(n, axis=0))
return mean_n
def wave_1d(u, num_loci=0):
"""
Simulates the wave of pedigree ancestors in 1D.
"""
N = int(2 / u)
L = 100
s = discsim.Simulator(L, num_loci==0)
if num_loci != 0:
s.num_loci = num_loci
s.max_population_size = 10000
s.event_classes = [ercs.DiscEventClass(r=1, u=u)]
s.sample = [None, L/2, L/2]
workers = multiprocessing.Pool(processes=multiprocessing.cpu_count())
#workers = None
t = [j * 500 * L for j in range(5)]
x = [j for j in range(L)]
for n in run_wave_replicates(s, t, 100, workers):
pyplot.plot(x, n)
pyplot.axhline(0.797 * N)
pyplot.show()
def main():
#simple_identity_check(rate=0.5)
#simple_identity_check(r=0.93, u=0.133, rate=0.5, num_parents=2,
# num_replicates=10**6, mutation_rate=1e-7)
#mixed_events_identity_check(100000)
#plot_mixed_events_identity()
#single_locus_diffusion(u=0.0000125, r=1, rate=1.0)
wave_1d(u=0.005)
#wave_1d(u=0.005, num_loci=100000)
if __name__ == "__main__":
main()
| gpl-3.0 |
kambysese/mne-python | mne/decoding/tests/test_ssd.py | 8 | 12817 | # Author: Denis A. Engemann <[email protected]>
# Victoria Peterson <[email protected]>
# License: BSD (3-clause)
import numpy as np
import pytest
from numpy.testing import (assert_array_almost_equal, assert_array_equal)
from mne import io
from mne.time_frequency import psd_array_welch
from mne.decoding.ssd import SSD
from mne.utils import requires_sklearn
from mne.filter import filter_data
from mne import create_info
from mne.decoding import CSP
freqs_sig = 9, 12
freqs_noise = 8, 13
def simulate_data(freqs_sig=[9, 12], n_trials=100, n_channels=20,
n_samples=500, samples_per_second=250,
n_components=5, SNR=0.05, random_state=42):
"""Simulate data according to an instantaneous mixin model.
Data are simulated in the statistical source space, where n=n_components
sources contain the peak of interest.
"""
rng = np.random.RandomState(random_state)
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1,
fir_design='firwin')
# generate an orthogonal mixin matrix
mixing_mat = np.linalg.svd(rng.randn(n_channels, n_channels))[0]
# define sources
S_s = rng.randn(n_trials * n_samples, n_components)
# filter source in the specific freq. band of interest
S_s = filter_data(S_s.T, samples_per_second, **filt_params_signal).T
S_n = rng.randn(n_trials * n_samples, n_channels - n_components)
S = np.hstack((S_s, S_n))
# mix data
X_s = np.dot(mixing_mat[:, :n_components], S_s.T).T
X_n = np.dot(mixing_mat[:, n_components:], S_n.T).T
# add noise
X_s = X_s / np.linalg.norm(X_s, 'fro')
X_n = X_n / np.linalg.norm(X_n, 'fro')
X = SNR * X_s + (1 - SNR) * X_n
X = X.T
S = S.T
return X, mixing_mat, S
@pytest.mark.slowtest
def test_ssd():
"""Test Common Spatial Patterns algorithm on raw data."""
X, A, S = simulate_data()
sf = 250
n_channels = X.shape[0]
info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg')
n_components_true = 5
# Init
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
ssd = SSD(info, filt_params_signal, filt_params_noise)
# freq no int
freq = 'foo'
filt_params_signal = dict(l_freq=freq, h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
with pytest.raises(TypeError, match='must be an instance '):
ssd = SSD(info, filt_params_signal, filt_params_noise)
# Wrongly specified noise band
freq = 2
filt_params_signal = dict(l_freq=freq, h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
with pytest.raises(ValueError, match='Wrongly specified '):
ssd = SSD(info, filt_params_signal, filt_params_noise)
# filt param no dict
filt_params_signal = freqs_sig
filt_params_noise = freqs_noise
with pytest.raises(ValueError, match='must be defined'):
ssd = SSD(info, filt_params_signal, filt_params_noise)
# Data type
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
ssd = SSD(info, filt_params_signal, filt_params_noise)
raw = io.RawArray(X, info)
pytest.raises(TypeError, ssd.fit, raw)
# check non-boolean return_filtered
with pytest.raises(ValueError, match='return_filtered'):
ssd = SSD(info, filt_params_signal, filt_params_noise,
return_filtered=0)
# check non-boolean sort_by_spectral_ratio
with pytest.raises(ValueError, match='sort_by_spectral_ratio'):
ssd = SSD(info, filt_params_signal, filt_params_noise,
sort_by_spectral_ratio=0)
# More than 1 channel type
ch_types = np.reshape([['mag'] * 10, ['eeg'] * 10], n_channels)
info_2 = create_info(ch_names=n_channels, sfreq=sf, ch_types=ch_types)
with pytest.raises(ValueError, match='At this point SSD'):
ssd = SSD(info_2, filt_params_signal, filt_params_noise)
# Number of channels
info_3 = create_info(ch_names=n_channels + 1, sfreq=sf, ch_types='eeg')
ssd = SSD(info_3, filt_params_signal, filt_params_noise)
pytest.raises(ValueError, ssd.fit, X)
# Fit
n_components = 10
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=n_components)
# Call transform before fit
pytest.raises(AttributeError, ssd.transform, X)
# Check outputs
ssd.fit(X)
assert (ssd.filters_.shape == (n_channels, n_channels))
assert (ssd.patterns_.shape == (n_channels, n_channels))
# Transform
X_ssd = ssd.fit_transform(X)
assert (X_ssd.shape[0] == n_components)
# back and forward
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=False)
ssd.fit(X)
X_denoised = ssd.apply(X)
assert_array_almost_equal(X_denoised, X)
# denoised by low-rank-factorization
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=n_components, sort_by_spectral_ratio=True)
ssd.fit(X)
X_denoised = ssd.apply(X)
assert (np.linalg.matrix_rank(X_denoised) == n_components)
# Power ratio ordering
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=False)
ssd.fit(X)
spec_ratio, sorter_spec = ssd.get_spectral_ratio(ssd.transform(X))
# since we now that the number of true components is 5, the relative
# difference should be low for the first 5 components and then increases
index_diff = np.argmax(-np.diff(spec_ratio))
assert index_diff == n_components_true - 1
# Check detected peaks
# fit ssd
n_components = n_components_true
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=n_components, sort_by_spectral_ratio=False)
ssd.fit(X)
out = ssd.transform(X)
psd_out, _ = psd_array_welch(out[0], sfreq=250, n_fft=250)
psd_S, _ = psd_array_welch(S[0], sfreq=250, n_fft=250)
corr = np.abs(np.corrcoef((psd_out, psd_S))[0, 1])
assert np.abs(corr) > 0.95
# Check pattern estimation
# Since there is no exact ordering of the recovered patterns
# a pair-wise greedy search will be done
error = list()
for ii in range(n_channels):
corr = np.abs(np.corrcoef(ssd.patterns_[ii, :].T, A[:, 0])[0, 1])
error.append(1 - corr)
min_err = np.min(error)
assert min_err < 0.3 # threshold taken from SSD original paper
def test_ssd_epoched_data():
"""Test Common Spatial Patterns algorithm on epoched data.
Compare the outputs when raw data is used.
"""
X, A, S = simulate_data(n_trials=100, n_channels=20, n_samples=500)
sf = 250
n_channels = X.shape[0]
info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg')
n_components_true = 5
# Build epochs as sliding windows over the continuous raw file
# Epoch length is 1 second
X_e = np.reshape(X, (100, 20, 500))
# Fit
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
# ssd on epochs
ssd_e = SSD(info, filt_params_signal, filt_params_noise)
ssd_e.fit(X_e)
# ssd on raw
ssd = SSD(info, filt_params_signal, filt_params_noise)
ssd.fit(X)
# Check if the 5 first 5 components are the same for both
_, sorter_spec_e = ssd_e.get_spectral_ratio(ssd_e.transform(X_e))
_, sorter_spec = ssd.get_spectral_ratio(ssd.transform(X))
assert_array_equal(sorter_spec_e[:n_components_true],
sorter_spec[:n_components_true])
@requires_sklearn
def test_ssd_pipeline():
"""Test if SSD works in a pipeline."""
from sklearn.pipeline import Pipeline
sf = 250
X, A, S = simulate_data(n_trials=100, n_channels=20, n_samples=500)
X_e = np.reshape(X, (100, 20, 500))
# define bynary random output
y = np.random.randint(2, size=100)
info = create_info(ch_names=20, sfreq=sf, ch_types='eeg')
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
ssd = SSD(info, filt_params_signal, filt_params_noise)
csp = CSP()
pipe = Pipeline([('SSD', ssd), ('CSP', csp)])
pipe.set_params(SSD__n_components=5)
pipe.set_params(CSP__n_components=2)
out = pipe.fit_transform(X_e, y)
assert (out.shape == (100, 2))
assert (pipe.get_params()['SSD__n_components'] == 5)
def test_sorting():
"""Test sorting learning during training."""
X, _, _ = simulate_data(n_trials=100, n_channels=20, n_samples=500)
# Epoch length is 1 second
X = np.reshape(X, (100, 20, 500))
# split data
Xtr, Xte = X[:80], X[80:]
sf = 250
n_channels = Xtr.shape[1]
info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg')
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
# check sort_by_spectral_ratio set to False
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=False)
ssd.fit(Xtr)
_, sorter_tr = ssd.get_spectral_ratio(ssd.transform(Xtr))
_, sorter_te = ssd.get_spectral_ratio(ssd.transform(Xte))
assert any(sorter_tr != sorter_te)
# check sort_by_spectral_ratio set to True
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=True)
ssd.fit(Xtr)
# check sorters
sorter_in = ssd.sorter_spec
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=False)
ssd.fit(Xtr)
_, sorter_out = ssd.get_spectral_ratio(ssd.transform(Xtr))
assert all(sorter_in == sorter_out)
def test_return_filtered():
"""Test return filtered option."""
# Check return_filtered
# Simulated more noise data and with broader freqquency than the desired
X, _, _ = simulate_data(SNR=0.9, freqs_sig=[4, 13])
sf = 250
n_channels = X.shape[0]
info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg')
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
# return filtered to true
ssd = SSD(info, filt_params_signal, filt_params_noise,
sort_by_spectral_ratio=False, return_filtered=True)
ssd.fit(X)
out = ssd.transform(X)
psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250)
freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1])
assert (freqs_up == freqs_sig)
# return filtered to false
ssd = SSD(info, filt_params_signal, filt_params_noise,
sort_by_spectral_ratio=False, return_filtered=False)
ssd.fit(X)
out = ssd.transform(X)
psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250)
freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1])
assert (freqs_up != freqs_sig)
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/externals/joblib/__init__.py | 54 | 5087 | """Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
========================= ================================================
**User documentation:** http://pythonhosted.org/joblib
**Download packages:** http://pypi.python.org/pypi/joblib#downloads
**Source code:** http://github.com/joblib/joblib
**Report issues:** http://github.com/joblib/joblib/issues
========================= ================================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.11'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend']
| bsd-3-clause |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/tests/arrays/boolean/test_comparison.py | 9 | 3103 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
from pandas.tests.extension.base import BaseOpsUtil
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# propagate NAs
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = pd.Series(data._data)
expected = op(expected, other)
expected = expected.astype("boolean")
# propagate NAs
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
self._compare_other(data, op_name, True)
def test_compare_array(self, data, all_compare_operators):
op_name = all_compare_operators
other = pd.array([True] * len(data), dtype="boolean")
self._compare_other(data, op_name, other)
other = np.array([True] * len(data))
self._compare_other(data, op_name, other)
other = pd.Series([True] * len(data))
self._compare_other(data, op_name, other)
@pytest.mark.parametrize("other", [True, False, pd.NA])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None], dtype="boolean")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = None
tm.assert_extension_array_equal(
a, pd.array([True, False, None], dtype="boolean")
)
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
b = pd.array([True, False, None] * 3, dtype="boolean")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = None
tm.assert_extension_array_equal(
a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
)
tm.assert_extension_array_equal(
b, pd.array([True, False, None] * 3, dtype="boolean")
)
| gpl-2.0 |
michigraber/neuralyzer | neuralyzer/cia/smff/model.py | 1 | 15120 | '''
A structured approach to calcium signal extraction, demixing and ROI estimation
from calcium imaging data along the model and algorithm described by
Pnevmatikakis et al. in [1].
Author : Michael H. Graber <[email protected]>
License : MIT License
References
----------
[1] Pnevmatikakis et al. (2014) A structured matrix factorization framework
for large scale calcium imaging data analysis. arXiv preprint
arXiv:1409.2903. http://arxiv.org/abs/1409.2903
'''
from __future__ import print_function
import numpy as np
from scipy import sparse
from sklearn.decomposition.nmf import _nls_subproblem
from neuralyzer.cia import nmf, foopsi
from . import noise
from neuralyzer import log
logger = log.get_logger()
try:
from sklearn.externals.joblib import Parallel, delayed
N_JOBS = -1
JOBLIB_TMP_FOLDER = '/tmp'
except:
print('joblib could not be imported. NO PARALLEL JOB EXECUTION!')
N_JOBS = None
TINY_POSITIVE_NUMBER = np.finfo(np.float).tiny
# STRUCTURED NON-NEGATIVE MATRIX FACTORIZATION
# -----------------------------------------------------------------------------
class SMFF(object):
''' Non-negative matrix factorization for calcium imaging data.
A calcium imaging dataset Y, [Y] = d x T, is factorized according to
Y = AC + bf.T
with [A] = d x k, [C] = k x T, [b] = d x 1, [f] = T x 1
A being the spatial components, C the calcium signal, b the background
component and f the background signal.
Model Parameters / Keyword Arguments
------------------------------------
temporal_update_method : ('projgrad' (default) | 'multiplicative' | 'foopsi')
The method/algorithm used to update the temporal components.
foopsi includes a model-based estimation of spike / event times.
noise_range : (default=(0.3, 0.5))
Spectral range for the estimation of the signal noise in units of the
temporal sampling rate.
njobs : (default=-1)
If joblib is installed computation can be distributed to multiple
processors. -1 distributes to all processors.
iterations : (default=3)
Number of block coordinate descent iterations for entire model.
filt_it : (default=0)
Execute spatial filtering on the spatial components every filt_it
iteration. 0 (or any smaller number) leads to no filtering.
Depending on your temporal_update_method of choice you can additionally set
the following parameters:
'projgrad'
``````````
tolH : (default=1e-4)
maxiter : (default=2000)
'foopsi'
````````
foopsi_autoreg_p : (default=3)
Order of the autoregressive model for the calcium signal.
foopsi_bcd_its : (default=5)
Block coordinate descent iterations for the foopsi temporal component
update.
References
----------
[1] Pnevmatikakis et al. (2014) A structured matrix factorization framework
for large scale calcium imaging data analysis. arXiv preprint
arXiv:1409.2903. http://arxiv.org/abs/1409.2903
'''
def __init__(self, **kwargs):
self.logger = kwargs.pop('logger', logger)
self._model_init = {}
self._step = 0
self.avg_abs_res_ = []
# SET UP THE PARAMETERS
# default model parameters
self.params = {
'temporal_update_method' : 'foopsi',
'noise_range' : (0.25, 0.5), # in fs units
'njobs' : -1,
'iterations': 3,
'filt_it': 0,
}
self.params.update(**kwargs)
# conditional parameters
if self.params['temporal_update_method'] == 'foopsi':
self.params.update({
'foopsi_bcd_its' : kwargs.get('foopsi_bcd_its', 5),
'foopsi_autoreg_p' : kwargs.get('foopsi_autoreg_p', 3),
})
elif self.params['temporal_update_method'] == 'projgrad':
self.params.update({
'projgrad_tolH' : kwargs.get('projgrad_tolH', 1e-4),
'projgrad_maxiter' : kwargs.get('maxiter', 2000),
})
paramstring = ', '.join("{!s}={!r}".format(k,v) for (k,v) in self.params.items())
self.logger.info('SMFF({0})'.format(paramstring))
def init_model(self, *args, **kwargs):
''' Initialize the model values. '''
# check for initialization parameters
for ini in ('A', 'C', 'f', 'b'):
self._model_init[ini] = kwargs.pop(ini, None)
if not any([v is None for v in self._model_init.values()]):
self.logger.info('Model entirely initialized with kwargs.')
return
if kwargs.get('random', False):
k = kwargs.get('k', None)
d = kwargs.get('d', None)
T = kwargs.get('T', None)
for k, v in self._model_init.items():
if v is None:
self._model_init[k] = {
'C' : np.random.rand(k, T),
'A' : np.random.rand(d, k),
'b' : np.random.rand(d, 1),
'f' : np.random.rand(1, T),
}[k]
else:
from . import greedy_init
self.params['greedy_init_params'] = kwargs
A, C, b, f = greedy_init.greedy(*args, **kwargs)
self._model_init['C'] = C
self._model_init['A'] = A
self._model_init['b'] = b
self._model_init['f'] = f
def fit(self, Y, copy_init=True):
''' Fit the data to the model with the specified parameters. '''
self.logger.info('Fitting SMFF model to data Y. [Y] = (%s, %s)' % Y.shape)
self._tap_model_init(copy=copy_init)
mean_residual = np.abs(self.calculate_residual(Y)).mean()
self.logger.info('avg absolute residual = %s ' % mean_residual)
self.avg_abs_res_.append(mean_residual)
while not self._stop():
self.logger.info('iteration %s / %s ' % \
(self._step+1, self.params['iterations']))
self._do_bcd_step(Y, **self.params)
def _tap_model_init(self, copy=True):
''' Copy or refer model coefficients to _model_init '''
if copy:
self.logger.info('Copying initial model values ..')
self.C_ = self._model_init['C'].copy()
self.A_ = self._model_init['A'].copy()
self.b_ = self._model_init['b'].copy()
self.f_ = self._model_init['f'].copy()
else:
self.C_ = self._model_init['C']
self.A_ = self._model_init['A']
self.b_ = self._model_init['b']
self.f_ = self._model_init['f']
def _stop(self):
''' Simple interation number based stop criterion for now. '''
return self._step >= self.params['iterations']
def _do_bcd_step(self, Y, **params):
''' Executes a single block gradient descent iteration step on the
whole model.
Model parameters can be overwritten using kwargs here.
'''
params.update(self.params)
# we need to compute the pixelwise noise only once
if not hasattr(self, 'pixel_noise_'):
self.logger.info('calculating noise level for all pixels.')
self.pixel_noise_ = noise.sigma_noise_spd_welch(Y, 1., params['noise_range'])
# UPDATE A, b ---------------------------------------------------------
self.A_, self.b_ = SMFF.update_A_b(
self.C_, self.A_, self.b_, self.f_, Y, self.pixel_noise_,
njobs=params['njobs'], logger=self.logger
)
# throw away components containing nan
remove_mask = np.isnan(self.A_).any(axis=0)
self.A_ = self.A_[:, ~remove_mask]
self.C_ = self.C_[~remove_mask]
# ROI component post processing
if not ( np.mod(self._step+1, params['filt_it']) or params['filt_it'] < 1 ):
self.logger.info('filter spatial components.')
self.A_ = filter_spatial_components(self.A_, disk_size=2)
# TODO : threshold spatial components ??
#if params.get('threshold_A', False):
#pass
# UPDATE C, f ---------------------------------------------------------
self.C_, self.f_, self.S_, self.G_ = SMFF.update_C_f(
self.C_, self.A_, self.b_, self.f_, Y,
logger=self.logger, **params)
# drop inactive components
if self.S_ is not None:
remove_mask = ~self.S_.any(axis=1)
if any(remove_mask):
self.logger.info('Removing inactive components {0}'.format(
np.where(remove_mask)[0]))
self.A_ = self.A_[:, ~remove_mask]
self.C_ = self.C_[~remove_mask]
# ROI Merging ---------------------------------------------------------
# TODO
# RESIDUAL CALCULATION ------------------------------------------------
mean_residual = np.abs(self.calculate_residual(Y)).mean()
self.logger.info('avg absolute residual = %s ' % mean_residual)
self.avg_abs_res_.append(mean_residual)
self._step += 1
@staticmethod
def update_C_f(C, A, b, f, Y, **kwargs):
''' Update the temporal components C and f. '''
logger = kwargs.pop('logger', None)
params = kwargs
if logger:
logger.info('Updating C and f with method "{0}".'.format(
params['temporal_update_method']))
# adding the background data to the matrices
W = np.hstack((A, b))
H = np.vstack((C, f))
if params['temporal_update_method'] == 'foopsi':
N, T = H.shape
# projection of the residual onto the spatial components
resYA = np.dot((Y - np.dot(W, H)).T, W)
H_ = np.zeros((N, T))
S_ = np.zeros((N-1, T))
G_ = np.zeros((N-1, params['foopsi_autoreg_p']))
# foopsi block coordinate descent iterations
for bcd_it in range(params['foopsi_bcd_its']):
# randomly permute component indices
for ii in np.random.permutation(range(N)):
# all regular components
if ii < N-1:
resYA[:,ii] = resYA[:,ii] + H[ii]
c_, spks_, b_, sn_, g_ = foopsi.cvx_foopsi(resYA[:, ii],
noise_range=params['noise_range'],
p=params['foopsi_autoreg_p'])
H_[ii, :] = (c_ + b_).squeeze()
resYA[:,ii] = resYA[:,ii] - H_[ii, :]
S_[ii, :] = spks_.squeeze()
G_[ii, :] = g_[1:].squeeze()
# the background
else:
resYA[:,ii] = resYA[:,ii] + H[ii]
H_[ii, :] = resYA[:, ii].clip(0, np.inf)
resYA[:,ii] = resYA[:,ii] - H_[ii, :]
C = H_[:N-1,:]
f = H_[N-1,:]
f = f[:, np.newaxis].T
elif params['temporal_update_method'] == 'projgrad':
# using the scikit-learn project gradient _nls_subproblem update
H_, grad, n_iter = _nls_subproblem(Y, W, H,
params['projgrad_tolH'], params['projgrad_maxiter'])
# rearrangement of output
C = H_[:-1, :]
f = H_[-1, :]
f = f[:, np.newaxis].T
S_, G_ = None, None
elif params['temporal_update_method'] == 'multiplicative':
# since this method is based on multiplication and division we need
# to ensure that we will not get infinity errors
W = W.clip(TINY_POSITIVE_NUMBER, np.inf)
H = H.clip(TINY_POSITIVE_NUMBER, np.inf)
# we call the multiplicative update from our Morup NMF implementation
W_ = nmf.NMF_L0.update_W(Y.T, W.T, H.T)
# rearrangement of output
C = W_[:, :-1].T
f = W_[:, -1].T
f = f[:, np.newaxis].T
S_, G_ = None, None
else:
raise ValueError('temporal_update_method %s not available.' % \
params['temporal_update_method'])
return C, f, S_, G_
@staticmethod
def update_A_b(C, A, b, f, Y, pixel_noise, **kwargs):
''' Update the spatial components A and the background b. '''
logger = kwargs.get('logger', None)
if logger: logger.info('Updating A and b')
d, T = Y.shape
H = np.vstack((C, f))
njobs = kwargs.get('njobs', N_JOBS)
if njobs is None:
A_ = []
for pidx, sn in enumerate(pixel_noise):
A_.append(nmf.do_lars_fit(H.T, Y[pidx], alpha=sn*np.sqrt(T)))
elif type(njobs) == int:
sqrtT = np.sqrt(T)
A_ = Parallel(n_jobs=njobs, max_nbytes=1e6)(
delayed(nmf.do_lars_fit)(
H.T, Y[pidx], alpha=pixel_noise[pidx]*sqrtT
)
for pidx in range(len(pixel_noise)))
else:
raise ValueError('njobs of improper type. Can only be an int or None.')
A_ = np.array(A_)
A = A_[:,:-1]
A /= np.linalg.norm(A, axis=0)[np.newaxis, :]
b = np.dot((Y - np.dot(A, C)), f.T/norm(f))
b /= np.linalg.norm(b)
return A, b
def Y_hat(self):
''' The estimated data using the current parameters on the model. '''
return np.dot(self.A_, self.C_) + np.dot(self.b_, self.f_)
def calculate_residual(self, Y):
''' '''
return Y - self.Y_hat()
# UTILITIES
# -----------------------------------------------------------------------------
def norm(x):
''' euclidian norm for 1d vector '''
return np.sqrt(np.dot(x.squeeze(),x.squeeze()))
def _morph_image_components(H):
m, k = H.shape
w = int(np.sqrt(m))
imshape = (w,w)
for i in range(k):
H[:, i] = morph_close_component(H[:,i], imshape)
return H
def morph_close_component(a, imshape):
from skimage import morphology
amorph = morphology.closing(a.reshape(imshape[0], imshape[1]))
return amorph.flatten()
def morph_erode_component(a, imshape, disk_size=1):
from skimage import morphology
amorph = morphology.erosion(
a.reshape(imshape[0], imshape[1]),
morphology.disk(disk_size))
return amorph.flatten()
def filter_spatial_components(H, filter_method='erosion', **kwargs):
''' '''
w = int(np.sqrt(H.shape[0])) # nastily assuming square images here
imshape = (w,w)
if filter_method == 'erosion':
for i, imflat in enumerate(H.T):
H[:, i] = morph_erode_component(imflat, imshape, **kwargs)
elif filter_method == 'closing':
for i, imflat in enumerate(H.T):
H[:, i] = morph_erode_component(imflat, imshape, **kwargs)
H /= np.linalg.norm(H) # normalize
return H
| mit |
atechnicolorskye/Stratospheric-UAV-Simulator | gfs_data_simulator_local.py | 1 | 100831 | """
gfs_data_simulator_local.py
GFS Local Data Simulator
DESCRIPTION
-----------
GFS Data Local Simulator Module:
This module simulates the descent of a UAV through multiple atmopsheric layers. The properties of the layers are obtained from National
Oceanic and Atmospheric Administration's (NOAA) Global Forecast System (GFS) using Niccolo' Zapponi's GFS and weather modules. The module
stores the initial Weather object to improve processing times by reducing the number of downloads. Using code from the base_simulator module,
displacement, heading and flight time are found layer-by-layer. The location and other relevant data of the UAV are updated every x seconds
of flight until it is above 100m where updates are carried out every y seconds. The UAV is assumed to have been carried up by a weather balloon
and there are no updrafts and downdrafts.
GLIDING FLIGHT PHYSICS
----------------------
When an aircraft is gliding, its lift/drag ratio is equivalent to its glide ratio (= forward velocity/sink rate) in steady wind condtions.
As the UAV assumes a predefined velocity in a direction predefinited by a wind field, the velocity vector of the UAV can be said to be
equivalent to the forward velocity compoent of the glide ratio which gives us a corresponding sink rate. In this simulator, we assume a 2D
wind field extending through the atmosphere. Like in the base_simulator module, we also consider the effects of constant headwinds and tailwinds
in this simulation: headwinds and tailwinds reduces and increases respectively the forward velocity of the UAV. This leads to changes in the
distance covered by the UAV, but not the time taken for the UAV to reach the ground.
USAGE
-----
The code requires the user to create an instance of the Flight() object. The user has the choice to call the methods Fly_1, Fly_2, Fly_3 and
Fly_4. Fly_1, Fly_2 and Fly_3 serves to simulate the descent of the UAV while keeping to a certain heading using either a distance-based
or a heading-based algorithm. Fly_4 demonstrates the UAV's capability to reach various different waypoints that are user determined using
the distance-based algorithm from the moethod Fly from gfs_data_simulator. Once one of the above methods hs been called, the user can call
the method PlotContour that plots relevant figures to visualise the descent process. Fly_Range is used to allow the user to plot the descent
of the UAV for various set headings and is used for Fly_1, Fly_2 and Fly_3 specifically. Fly_Range_2 is used to plot the results from Fly_4 for
various pre-determined headings.
Exmaple:
y = Flight(50.1, -5.00, 30000, (2014,03,01,19,31,01), 10, 2, 0.6, 0.4, 3, 0.5)
y.Fly_1(90) # Same for Fly_2 and Fly_3
y.PlotContour()
y = Flight(50.1, -5.00, 30000, (2014,03,01,19,31,01), 10, 2, 0.6, 0.4, 3, 0.5)
y.Fly_4(5000, 5000, 500, 45)
y = Flight(50.1, -5.00, 7500, (2014,03,05,19,31,01), 5, 2, 0.2, 0.15, 3, 0.5)
y.Fly_Range(3, [45,90], False, False, True, True, False)
y = Flight(50.1, -5.00, 5000, (2014,03,10,19,31,01), 5, 2, 0.2, 0.15, 3, 0.5)
y.Fly_Range_2(0, 359, 10000, 100000, 5000, 5)
University of Southampton
Si Kai Lee, [email protected]
"""
__author__ = "Si Kai Lee, University of Southampton, [email protected]"
from global_tools import m2deg, getUTCOffset
from datetime import datetime, timedelta
import weather
import numpy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from scipy.integrate import odeint
from write_file import WriteToFile_Dist, WriteToFile_Head1, WriteToFile_Head2, WriteToFile_Local, WriteToFile_Simulator
import logging
from sys import exit
class Flight(object):
"""
Class that contains the simulator of the simulation of UAV flight from a provided altitude.
Instantiates the Flight_LatLon object when called.
Inputs: Starting Latitutde, Starting Longitude, Starting Altitude, Starting Time, Average Lift-to-Drag Ratio,
Mass of UAV, PlanformArea (Assuming UAV is a flying wind),Average Coefficient of Lift, Time Step used
for ODEs at altitudes above 100m, Time Step used for ODEs at altitudes below 100m.
"""
def __init__(self, Lat, Lon, Alt, StartTime, LD, Mass, PlanformArea, CL, TimeStep1, TimeStep2):
# Initialises flight container object
# Sets up logging configuration
logging.basicConfig(filename='Fly.log', format='%(levelname)s:%(message)s', level=logging.DEBUG)
# User defined variables
self.Lat = Lat
self.Lon = Lon
self.Alt = Alt
self._StartTime = StartTime
self.StartTime = datetime(*StartTime)
self.LD = LD
self.Mass = Mass
self.PlanformArea = PlanformArea
self.CL = CL
self.TimeStep1 = TimeStep1
self.TimeStep2 = TimeStep1
# Creates empty lists to store x and y coordinates and defined heading for UAV for Fly_4 and Fly_Range_2
self.x_coords = []
self.y_coords = []
self.x_coords_max = []
self.y_coords_max = []
self.x_coords_end = []
self.y_coords_end = []
self.set_heading = []
self.end_heading = []
self.x_coords_fail = []
self.y_coords_fail = []
self.x_coords_end_fail = []
self.y_coords_end_fail = []
self.set_heading_fail = []
self.end_heading_fail = []
# Checks if StartTime is between 30 days before and 6 after current time
if self.StartTime > datetime.now():
if self.StartTime - datetime.now() > timedelta(days=6):
raise Exception('Your selected date and time is greater than 6 days in the future. Please try again.')
exit()
else:
if datetime.now() - self.StartTime > timedelta(days=30):
raise Exception('Your selected date and time is greater than 30 days in the past. Please try again.')
exit()
# Dependent variables
self._BaseLat = self.Lat
self._BaseLon = self.Lon
self._xyWindSpeed = 0
self._WindHead = 0
self._GlideAngle = numpy.arctan(float(1) / self.LD)
self._XZCurrentSpeed = 0
self._XZTempSpeed = 0
self._CurrentDensity = 0
self._CurrentLatLon = 0
self._CurrentLat = self.Lat
self._CurrentLon = self.Lon
self._CurrentAlt = self.Alt
self._CurrentDist = 0
self._CurrentTime = self.StartTime
self._TimeDump = [0]
self._AltDump = [self.Alt]
self._DistDump = [0]
self._DensityDump = []
self._XDirDump = [0]
self._YDirDump = [0]
self._HeadingDump = []
self._XWindDump = []
self._YWindDump = []
# Intialises Weather Environment object
self.WeatherData = [weather.forecastEnvironment()]
# Obtains data from GFS for current flight conditions
self.WeatherData[0].launchSiteLat = self.Lat
self.WeatherData[0].launchSiteLon = self.Lon
self.WeatherData[0].launchSiteElev = self.Alt
self.WeatherData[0].dateAndTime = self.StartTime
self.WeatherData[0].UTC_offset = getUTCOffset(self.Lat, self.Lon, self.StartTime)
# Downloads GFS weather data
print 'Downloading the forecast (might take a while)...'
self.WeatherData[0].loadForecast()
logging.info('GFS Data downloaded!')
print "Forecast downloaded!"
# Get relevant properties of current air mass
self._xyWindSpeed = self.WeatherData[0].getWindSpeed(self.Lat, self.Lon, self.Alt, self.StartTime)
logging.info('XY Wind Speed: %s', self._xyWindSpeed)
self._WindHead = self.WeatherData[0].getWindDirection(self.Lat, self.Lon, self.Alt, self.StartTime)
logging.info('Wind Heading: %s', self._WindHead)
self._CurrentDensity = self.WeatherData[0].getDensity(self.Lat, self.Lon, self.Alt, self.StartTime)
logging.info('Current Density: %s', self._CurrentDensity)
# Appends Density in data dumps
self._DensityDump.append(self._CurrentDensity)
def Clear(self):
# Resets object attributes
self.WeatherData = [self.WeatherData[0]]
# Dependent variables
self._BaseLat = self.Lat
self._BaseLon = self.Lon
self._xyWindSpeed = self.WeatherData[0].getWindSpeed(self.Lat, self.Lon, self.Alt, self.StartTime)
self._WindHead = self.WeatherData[0].getWindDirection(self.Lat, self.Lon, self.Alt, self.StartTime)
self._GlideAngle = numpy.arctan(float(1) / self.LD)
self._XZCurrentSpeed = 0
self._XZTempSpeed = 0
self._CurrentDensity = self.WeatherData[0].getDensity(self.Lat, self.Lon, self.Alt, self.StartTime)
self._CurrentLatLon = 0
self._CurrentLat = self.Lat
self._CurrentLon = self.Lon
self._CurrentAlt = self.Alt
self._CurrentDist = 0
self._CurrentTime = self.StartTime
self._TimeDump = [0]
self._AltDump = [self.Alt]
self._DistDump = [0]
self._DensityDump = [self._CurrentDensity]
self._XDirDump = [0]
self._YDirDump = [0]
self._HeadingDump = []
self._XWindDump = []
self._YWindDump = []
def Fly_1(self, FinalHead):
"""
Simulates of gliding UAV flight from a provided altitude; weather data is provided by GFS.
Uses distance-based algorithm to calculate UAV heading.
Inputs: Desired Heading
Prints and Returns: End Latitude, End Longtitude, Ideal End Latitude, Ideal End Longitude, End Heading, Desired Heading
"""
# Fly for TimeStep1 seconds and recalculates dependent variables
print 'Running Fly_1'
# Converts FinalHead from deg to rad
self.FinalHead = numpy.deg2rad(FinalHead)
logging.info('User Input:\nStart Latitude: %s\nStart Longtitude: %s\nStart Altitude: %s\nDesired Heading: %s\nStart Time: %s\nLift to Drag Ratio: %s\n \
Mass: %s\nWing Planform Area: %s\nCoefficient of Lift: %s', self.Lat, self.Lon, self.Alt, self.FinalHead, self.StartTime, self.LD,
self.Mass, self.PlanformArea, self.CL)
while self._CurrentAlt > 100:
# Fly for TimeStep1 seconds and recalculates dependent variables
# Calculates heading of UAV
self._BaseXDiff = self._DistDump[-1] * numpy.cos(self.FinalHead)
self._BaseYDiff = self._DistDump[-1] * numpy.sin(self.FinalHead)
self._XDiff, self._YDiff = self._XDirDump[-1], self._YDirDump[-1]
if (self._BaseXDiff - self._XDiff) == 0 and (self._BaseYDiff - self._YDiff) > 0:
self._Heading = numpy.pi / 2
elif (self._BaseXDiff - self._XDiff) < 0 and (self._BaseYDiff - self._YDiff) == 0:
self._Heading = numpy.pi
elif (self._BaseXDiff - self._XDiff) == 0 and (self._BaseYDiff - self._YDiff) < 0:
self._Heading = 3 * numpy.pi / 2
else:
self._Heading = numpy.arctan2(self._BaseYDiff - self._YDiff, self._BaseXDiff - self._XDiff)
self._HeadingDump.append(self._Heading)
# Provides list of time values for ODE solver
time = numpy.linspace(0, self.TimeStep1)
# Variables required for ODE solver
SinA = numpy.sin(self._GlideAngle)
CosA = numpy.cos(self._GlideAngle)
B = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass)
C = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass * self.LD)
# Sets up ODEs to be solved
def z_derivatives(z, t):
return numpy.array([-z[1] * SinA, - B * (z[1] ** 2) + 9.81 * SinA])
def xy_derivatives(xy, t):
return numpy.array([xy[1] * CosA, - C * (xy[1] ** 2) + 9.81 * CosA])
# Solves for change in altitude
z_initial_conditions = numpy.array([self._CurrentAlt, self._XZCurrentSpeed])
z = odeint(z_derivatives, z_initial_conditions, time)
self._CurrentAlt, self._XZCurrentSpeed = z[-1]
# Solves for distance travelled
xy_initial_conditions = numpy.array([self._CurrentDist, self._XZTempSpeed])
xy = odeint(xy_derivatives, xy_initial_conditions, time)
self._CurrentDist, self._XZTempSpeed = xy[-1]
# Appends changes in X and Y directions to X and Y data dumps
if self._Heading == 0:
self._XDirDump.append(self._XDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi:
self._XDirDump.append(self._XDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == 3 * numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
else:
self._XDirDump.append(self._XDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.cos(self._Heading)) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.sin(self._Heading)) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
# Appends the wind conditions to wind data dumps
self._XWindDump.append(self._xyWindSpeed * numpy.cos(self._WindHead))
self._YWindDump.append(self._xyWindSpeed * numpy.sin(self._WindHead))
# Updates parameters used to obtain the next set of forecasts
self._CurrentLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._CurrentLat = self.Lat + self._CurrentLatLon[0]
self._CurrentLon = self.Lon + self._CurrentLatLon[1]
self._CurrentTime += timedelta(seconds=self.TimeStep1)
# Appends useful data to data dumps
self._TimeDump.append(self._TimeDump[-1] + self.TimeStep1)
self._AltDump.append(self._CurrentAlt)
self._DistDump.append(self._CurrentDist)
logging.info('Current Latitude: %s\nCurrent Longitude: %s\nCurrent Altitude: %s\nCurrent Time: %s', self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
# Check if region travelled exceeds 3 degrees Lat and 3 degrees Lon from the launch site
if abs(self._CurrentLat - self._BaseLat) < 1.5 and abs(self._CurrentLon - self._BaseLon) < 1.5:
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
else:
print 'UAV has travelled out of forecast region.'
print 'New forecast has to be obtained, please wait while forecast is downloaded and processed.'
self._BaseLat = self._CurrentLat
self._BaseLon = self._CurrentLon
# Intialises Weather Environment object
self.WeatherData.append(weather.forecastEnvironment())
# Obtains data from GFS for current flight conditions
self.WeatherData[-1].launchSiteLat = self._BaseLat
self.WeatherData[-1].launchSiteLon = self._BaseLon
self.WeatherData[-1].launchSiteElev = self._CurrentAlt
self.WeatherData[-1].dateAndTime = self._CurrentTime
self.WeatherData[-1].UTC_offset = getUTCOffset(self._BaseLat, self._BaseLon, self._CurrentTime)
# Downloads GFS weather data
self.WeatherData[-1].loadForecast()
logging.info('GFS Data downloaded!')
print "Forecast downloaded!"
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
while self._CurrentAlt > 0:
# Fly for TimeStep2 seconds and recalculates dependent variables
self._BaseXDiff = self._DistDump[-1] * numpy.cos(self.FinalHead)
self._BaseYDiff = self._DistDump[-1] * numpy.sin(self.FinalHead)
self._XDiff, self._YDiff = self._XDirDump[-1], self._YDirDump[-1]
if (self._BaseXDiff - self._XDiff) == 0 and (self._BaseYDiff - self._YDiff) > 0:
self._Heading = numpy.pi / 2
elif (self._BaseXDiff - self._XDiff) < 0 and (self._BaseYDiff - self._YDiff) == 0:
self._Heading = numpy.pi
elif (self._BaseXDiff - self._XDiff) == 0 and (self._BaseYDiff - self._YDiff) < 0:
self._Heading = 3 * numpy.pi / 2
else:
self._Heading = numpy.arctan2(self._BaseYDiff - self._YDiff, self._BaseXDiff - self._XDiff)
self._HeadingDump.append(self._Heading)
# Provides list of time values for ODE solver
time = numpy.linspace(0, self.TimeStep2, num=100)
# Variables required for ODE solver
SinA = numpy.sin(self._GlideAngle)
CosA = numpy.cos(self._GlideAngle)
B = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass)
C = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass * self.LD)
# Sets up ODEs to be solved
def z_derivatives(z, t):
return numpy.array([-z[1] * SinA, - B * (z[1] ** 2) + 9.81 * SinA])
def xy_derivatives(xy, t):
return numpy.array([xy[1] * CosA, - C * (xy[1] ** 2) + 9.81 * CosA])
# Solves for change in altitude
z_initial_conditions = numpy.array([self._CurrentAlt, self._XZCurrentSpeed])
z = odeint(z_derivatives, z_initial_conditions, time)
self._CurrentAlt, self._XZCurrentSpeed = z[-1]
# Solves for distance travelled
xy_initial_conditions = numpy.array([self._CurrentDist, self._XZTempSpeed])
xy = odeint(xy_derivatives, xy_initial_conditions, time)
self._CurrentDist, self._XZTempSpeed = xy[-1]
# Appends changes in X and Y directions to X and Y data dumps
if self._Heading == 0:
self._XDirDump.append(self._XDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == numpy.pi:
self._XDirDump.append(self._XDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == 3 * numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
else:
self._XDirDump.append(self._XDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.cos(self._Heading)) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.sin(self._Heading)) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
# Appends the wind conditions to wind data dumps
self._XWindDump.append(self._xyWindSpeed * numpy.cos(self._WindHead))
self._YWindDump.append(self._xyWindSpeed * numpy.sin(self._WindHead))
# Updates parameters used to obtain the next set of forecasts
self._CurrentLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._CurrentLat = self.Lat + self._CurrentLatLon[0]
self._CurrentLon = self.Lon + self._CurrentLatLon[1]
self._CurrentTime += timedelta(seconds=self.TimeStep2)
# Appends useful data to data dumps
self._TimeDump.append(self._TimeDump[-1] + self.TimeStep2)
self._AltDump.append(self._CurrentAlt)
self._DistDump.append(self._CurrentDist)
logging.info('Current Latitude: %s\nCurrent Longitude: %s\nCurrent Altitude: %s\nCurrent Time: %s', self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
# Check if region travelled exceeds 3 degrees Lat and 3 degrees Lon from the launch site
if abs(self._CurrentLat - self._BaseLat) < 1.5 and abs(self._CurrentLon - self._BaseLon) < 1.5:
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
# Checks if UAV is low enough for calculations to stop
if self._CurrentAlt < 1:
break
else:
print 'UAV has travelled out of forecast region.'
print 'New forecast has to be obtained, please wait while forecast is downloaded and processed.'
self._BaseLat = self._CurrentLat
self._BaseLon = self._CurrentLon
# Intialises Weather Environment object
self.WeatherData.append(weather.forecastEnvironment())
# Obtains data from GFS for current flight conditions
self.WeatherData[-1].launchSiteLat = self._BaseLat
self.WeatherData[-1].launchSiteLon = self._BaseLon
self.WeatherData[-1].launchSiteElev = self._CurrentAlt
self.WeatherData[-1].dateAndTime = self._CurrentTime
self.WeatherData[-1].UTC_offset = getUTCOffset(self._BaseLat, self._BaseLon, self._CurrentTime)
# Downloads GFS weather data
self.WeatherData[-1].loadForecast()
logging.info('GFS Data downloaded!')
print "Forecast downloaded!"
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
# Checks if UAV is low enough for calculations to stop
if self._CurrentAlt < 1:
break
# Prepares output
self._FinalLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._EndHead = numpy.rad2deg(numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1]))
if self._EndHead < 0 and FinalHead > 90:
self._EndHead += 360
logging.info('Final Latitude: %s\nFinal Longitude: %s\nDescent Time: %s', self._FinalLatLon[0] + self.Lat, self._FinalLatLon[1] + self.Lon, (self._CurrentTime - self.StartTime))
print 'Final Latitude: {}\nFinal Longitude: {}\nDescent Time: {}'.format(self._FinalLatLon[0] + self.Lat, self._FinalLatLon[1] + self.Lon, (self._CurrentTime - self.StartTime))
print 'Final Heading: {}\nDesired Heading: {}\nFinal Altitude: {}'.format(self._EndHead, FinalHead, self._CurrentAlt)
# Sets variables for plotting
# Outputs
return [self._XDirDump[-1], self._YDirDump[-1], self._CurrentDist * numpy.cos(self.FinalHead), self._CurrentDist * numpy.sin(self.FinalHead), FinalHead, self._EndHead]
def Fly_2(self, FinalHead):
"""
Simulates of gliding UAV flight from a provided altitude; weather data is provided by GFS.
Uses heading-based algorithm to calculate UAV heading.
Inputs: Desired Heading
Prints and Returns: End Latitude, End Longtitude, Ideal End Latitude, Ideal End Longitude, End Heading, Desired Heading
"""
# Fly for TimeStep1 seconds and recalculates dependent variables
print 'Running Fly_2'
# Processes desired heading to avoid degree/radian related issues
if FinalHead < 0:
FinalHead = 360 - (abs(FinalHead) % 360)
elif FinalHead > 360:
FinalHead = FinalHead % 360
else:
FinalHead = FinalHead
if FinalHead > 315:
# Makes self.FinalHead -ve in radians
self.FinalHead = numpy.unwrap([0, numpy.deg2rad(FinalHead)])[1]
else:
self.FinalHead = numpy.deg2rad(FinalHead)
logging.info('User Input:\nStart Latitude: %s\nStart Longtitude: %s\nStart Altitude: %s\nDesired Heading: %s\nStart Time: %s\nLift to Drag Ratio: %s\n \
Mass: %s\nWing Planform Area: %s\nCoefficient of Lift: %s', self.Lat, self.Lon, self.Alt, self.FinalHead, self.StartTime, self.LD,
self.Mass, self.PlanformArea, self.CL)
while self._CurrentAlt > 100:
# Fly for TimeStep1 seconds and recalculates dependent variables
# Calculates heading of UAV
self._CurrentHead = numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1])
# Avoids issues at 0, 180, 360 degress
if self.FinalHead < 0:
self._CurrentHead = self._CurrentHead
elif self.FinalHead >= 0 and self.FinalHead < numpy.pi / 8:
self._CurrentHead = self._CurrentHead
else:
if self._CurrentHead < 0:
self._CurrentHead += 2 * numpy.pi
else:
self._CurrentHead = self._CurrentHead
if abs(self._CurrentHead - self.FinalHead) > 0.8:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead
elif abs(self._CurrentHead - self.FinalHead) > 0.5:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.3:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 1.6 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 1.6 * (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.1:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 2.7 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 2.7 * (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.05:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 8 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 8 * (self.FinalHead - self._CurrentHead)
else:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 16 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 16 * (self.FinalHead - self._CurrentHead)
self._HeadingDump.append(self._Heading)
# Provides list of time values for ODE solver
time = numpy.linspace(0, self.TimeStep1)
# Variables required for ODE solver
SinA = numpy.sin(self._GlideAngle)
CosA = numpy.cos(self._GlideAngle)
B = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass)
C = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass * self.LD)
# Sets up ODEs to be solved
def z_derivatives(z, t):
return numpy.array([-z[1] * SinA, - B * (z[1] ** 2) + 9.81 * SinA])
def xy_derivatives(xy, t):
return numpy.array([xy[1] * CosA, - C * (xy[1] ** 2) + 9.81 * CosA])
# Solves for change in altitude
z_initial_conditions = numpy.array([self._CurrentAlt, self._XZCurrentSpeed])
z = odeint(z_derivatives, z_initial_conditions, time)
self._CurrentAlt, self._XZCurrentSpeed = z[-1]
# Solves for distance travelled
xy_initial_conditions = numpy.array([self._CurrentDist, self._XZTempSpeed])
xy = odeint(xy_derivatives, xy_initial_conditions, time)
self._CurrentDist, self._XZTempSpeed = xy[-1]
# Appends changes in X and Y directions to X and Y data dumps
if self._Heading == 0:
self._XDirDump.append(self._XDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi:
self._XDirDump.append(self._XDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == 3 * numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
else:
self._XDirDump.append(self._XDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.cos(self._Heading)) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.sin(self._Heading)) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
# Appends the wind conditions to wind data dumps
self._XWindDump.append(self._xyWindSpeed * numpy.cos(self._WindHead))
self._YWindDump.append(self._xyWindSpeed * numpy.sin(self._WindHead))
# Updates parameters used to obtain the next set of forecasts
self._CurrentLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._CurrentLat = self.Lat + self._CurrentLatLon[0]
self._CurrentLon = self.Lon + self._CurrentLatLon[1]
self._CurrentTime += timedelta(seconds=self.TimeStep1)
# Appends useful data to data dumps
self._TimeDump.append(self._TimeDump[-1] + self.TimeStep1)
self._AltDump.append(self._CurrentAlt)
self._DistDump.append(self._CurrentDist)
logging.info('Current Latitude: %s\nCurrent Longitude: %s\nCurrent Altitude: %s\nCurrent Time: %s', self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
# Check if region travelled exceeds 3 degrees Lat and 3 degrees Lon from the launch site
if abs(self._CurrentLat - self._BaseLat) < 1.5 and abs(self._CurrentLon - self._BaseLon) < 1.5:
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
else:
print 'UAV has travelled out of forecast region.'
print 'New forecast has to be obtained, please wait while forecast is downloaded and processed.'
self._BaseLat = self._CurrentLat
self._BaseLon = self._CurrentLon
# Intialises Weather Environment object
self.WeatherData.append(weather.forecastEnvironment())
# Obtains data from GFS for current flight conditions
self.WeatherData[-1].launchSiteLat = self._BaseLat
self.WeatherData[-1].launchSiteLon = self._BaseLon
self.WeatherData[-1].launchSiteElev = self._CurrentAlt
self.WeatherData[-1].dateAndTime = self._CurrentTime
self.WeatherData[-1].UTC_offset = getUTCOffset(self._BaseLat, self._BaseLon, self._CurrentTime)
# Downloads GFS weather data
self.WeatherData[-1].loadForecast()
logging.info('GFS Data downloaded!')
print "Forecast downloaded!"
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
while self._CurrentAlt > 0:
# Fly for TimeStep2 seconds and recalculates dependent variables
# Calculates heading of UAV
self._CurrentHead = numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1])
# Avoids issues at 0, 180, 360 degress
if self.FinalHead < 0:
self._CurrentHead = self._CurrentHead
elif self.FinalHead >= 0 and self.FinalHead < numpy.pi / 8:
self._CurrentHead = self._CurrentHead
else:
if self._CurrentHead < 0:
self._CurrentHead += 2 * numpy.pi
else:
self._CurrentHead = self._CurrentHead
if abs(self._CurrentHead - self.FinalHead) > 0.8:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead
elif abs(self._CurrentHead - self.FinalHead) > 0.5:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.3:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 1.6 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 1.6 * (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.1:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 2.7 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 2.7 * (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.05:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 8 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 8 * (self.FinalHead - self._CurrentHead)
else:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 16 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 16 * (self.FinalHead - self._CurrentHead)
self._HeadingDump.append(self._Heading)
# Provides list of time values for ODE solver
time = numpy.linspace(0, self.TimeStep2, num=100)
# Variables required for ODE solver
SinA = numpy.sin(self._GlideAngle)
CosA = numpy.cos(self._GlideAngle)
B = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass)
C = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass * self.LD)
# Sets up ODEs to be solved
def z_derivatives(z, t):
return numpy.array([-z[1] * SinA, - B * (z[1] ** 2) + 9.81 * SinA])
def xy_derivatives(xy, t):
return numpy.array([xy[1] * CosA, - C * (xy[1] ** 2) + 9.81 * CosA])
# Solves for change in altitude
z_initial_conditions = numpy.array([self._CurrentAlt, self._XZCurrentSpeed])
z = odeint(z_derivatives, z_initial_conditions, time)
self._CurrentAlt, self._XZCurrentSpeed = z[-1]
# Solves for distance travelled
xy_initial_conditions = numpy.array([self._CurrentDist, self._XZTempSpeed])
xy = odeint(xy_derivatives, xy_initial_conditions, time)
self._CurrentDist, self._XZTempSpeed = xy[-1]
# Appends changes in X and Y directions to X and Y data dumps
if self._Heading == 0:
self._XDirDump.append(self._XDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == numpy.pi:
self._XDirDump.append(self._XDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == 3 * numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
else:
self._XDirDump.append(self._XDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.cos(self._Heading)) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.sin(self._Heading)) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
# Appends the wind conditions to wind data dumps
self._XWindDump.append(self._xyWindSpeed * numpy.cos(self._WindHead))
self._YWindDump.append(self._xyWindSpeed * numpy.sin(self._WindHead))
# Updates parameters used to obtain the next set of forecasts
self._CurrentLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._CurrentLat = self.Lat + self._CurrentLatLon[0]
self._CurrentLon = self.Lon + self._CurrentLatLon[1]
self._CurrentTime += timedelta(seconds=self.TimeStep2)
# Appends useful data to data dumps
self._TimeDump.append(self._TimeDump[-1] + self.TimeStep2)
self._AltDump.append(self._CurrentAlt)
self._DistDump.append(self._CurrentDist)
logging.info('Current Latitude: %s\nCurrent Longitude: %s\nCurrent Altitude: %s\nCurrent Time: %s', self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
# Check if region travelled exceeds 3 degrees Lat and 3 degrees Lon from the launch site
if abs(self._CurrentLat - self._BaseLat) < 1.5 and abs(self._CurrentLon - self._BaseLon) < 1.5:
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
# Checks if UAV is low enough for calculations to stop
if self._CurrentAlt < 1:
break
else:
print 'UAV has travelled out of forecast region.'
print 'New forecast has to be obtained, please wait while forecast is downloaded and processed.'
self._BaseLat = self._CurrentLat
self._BaseLon = self._CurrentLon
# Intialises Weather Environment object
self.WeatherData.append(weather.forecastEnvironment())
# Obtains data from GFS for current flight conditions
self.WeatherData[-1].launchSiteLat = self._BaseLat
self.WeatherData[-1].launchSiteLon = self._BaseLon
self.WeatherData[-1].launchSiteElev = self._CurrentAlt
self.WeatherData[-1].dateAndTime = self._CurrentTime
self.WeatherData[-1].UTC_offset = getUTCOffset(self._BaseLat, self._BaseLon, self._CurrentTime)
# Downloads GFS weather data
self.WeatherData[-1].loadForecast()
logging.info('GFS Data downloaded!')
print "Forecast downloaded!"
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
# Checks if UAV is low enough for calculations to stop
if self._CurrentAlt < 1:
break
# Prepares output
self._FinalLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._EndHead = numpy.rad2deg(numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1]))
if self._EndHead < 0 and FinalHead > 90:
self._EndHead += 360
logging.info('Final Latitude: %s\nFinal Longitude: %s\nDescent Time: %s', self._FinalLatLon[0] + self.Lat, self._FinalLatLon[1] + self.Lon, (self._CurrentTime - self.StartTime))
print 'Final Latitude: {}\nFinal Longitude: {}\nDescent Time: {}'.format(self._FinalLatLon[0] + self.Lat, self._FinalLatLon[1] + self.Lon, (self._CurrentTime - self.StartTime))
print 'Final Heading: {}\nDesired Heading: {}\nFinal Altitude: {}'.format(self._EndHead, FinalHead, self._CurrentAlt)
# Sets variables for plotting
WriteToFile_Simulator(self._StartTime, self._XDirDump, self._YDirDump, self._DensityDump, self._AltDump, self._TimeDump, self.TimeStep1)
# Outputs
return [self._XDirDump[-1], self._YDirDump[-1], self._CurrentDist * numpy.cos(self.FinalHead), self._CurrentDist * numpy.sin(self.FinalHead), FinalHead, self._EndHead]
def Fly_3(self, FinalHead):
"""
Simulates of gliding UAV flight from a provided altitude; weather data is provided by GFS.
Uses heading-based algorithm to calculate UAV heading.
Inputs: Desired Heading
Prints and Returns: End Latitude, End Longtitude, Ideal End Latitude, Ideal End Longitude, End Heading, Desired Heading
"""
# Fly for TimeStep1 seconds and recalculates dependent variables
print 'Running Fly_3'
# Processes desired heading to avoid degree/radian related issues
if FinalHead < 0:
FinalHead = 360 - (abs(FinalHead) % 360)
elif FinalHead > 360:
FinalHead = FinalHead % 360
else:
FinalHead = FinalHead
if FinalHead > 315:
# Makes self.FinalHead -ve in radians
self.FinalHead = numpy.unwrap([0, numpy.deg2rad(FinalHead)])[1]
else:
self.FinalHead = numpy.deg2rad(FinalHead)
logging.info('User Input:\nStart Latitude: %s\nStart Longtitude: %s\nStart Altitude: %s\nDesired Heading: %s\nStart Time: %s\nLift to Drag Ratio: %s\n \
Mass: %s\nWing Planform Area: %s\nCoefficient of Lift: %s', self.Lat, self.Lon, self.Alt, self.FinalHead, self.StartTime, self.LD,
self.Mass, self.PlanformArea, self.CL)
while self._CurrentAlt > 0.6 * self.Alt:
# Fly for TimeStep1 seconds and recalculates dependent variables
# Calculates heading of UAV
self._CurrentHead = numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1])
# Avoids issues at 0, 180, 360 degress
if self.FinalHead < 0:
self._CurrentHead = self._CurrentHead
elif self.FinalHead >= 0 and self.FinalHead < numpy.pi / 8:
self._CurrentHead = self._CurrentHead
else:
if self._CurrentHead < 0:
self._CurrentHead += 2 * numpy.pi
else:
self._CurrentHead = self._CurrentHead
if abs(self._CurrentHead - self.FinalHead) > 0.8:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead
elif abs(self._CurrentHead - self.FinalHead) > 0.5:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.3:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 1.6 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 1.6 * (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.1:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 2.7 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 2.7 * (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.05:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 8 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 8 * (self.FinalHead - self._CurrentHead)
else:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 16 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 16 * (self.FinalHead - self._CurrentHead)
self._HeadingDump.append(self._Heading)
# Provides list of time values for ODE solver
time = numpy.linspace(0, self.TimeStep1)
# Variables required for ODE solver
SinA = numpy.sin(self._GlideAngle)
CosA = numpy.cos(self._GlideAngle)
B = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass)
C = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass * self.LD)
# Sets up ODEs to be solved
def z_derivatives(z, t):
return numpy.array([-z[1] * SinA, - B * (z[1] ** 2) + 9.81 * SinA])
def xy_derivatives(xy, t):
return numpy.array([xy[1] * CosA, - C * (xy[1] ** 2) + 9.81 * CosA])
# Solves for change in altitude
z_initial_conditions = numpy.array([self._CurrentAlt, self._XZCurrentSpeed])
z = odeint(z_derivatives, z_initial_conditions, time)
self._CurrentAlt, self._XZCurrentSpeed = z[-1]
# Solves for distance travelled
xy_initial_conditions = numpy.array([self._CurrentDist, self._XZTempSpeed])
xy = odeint(xy_derivatives, xy_initial_conditions, time)
self._CurrentDist, self._XZTempSpeed = xy[-1]
# Appends changes in X and Y directions to X and Y data dumps
if self._Heading == 0:
self._XDirDump.append(self._XDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi:
self._XDirDump.append(self._XDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == 3 * numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
else:
self._XDirDump.append(self._XDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.cos(self._Heading)) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.sin(self._Heading)) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
# Appends the wind conditions to wind data dumps
self._XWindDump.append(self._xyWindSpeed * numpy.cos(self._WindHead))
self._YWindDump.append(self._xyWindSpeed * numpy.sin(self._WindHead))
# Updates parameters used to obtain the next set of forecasts
self._CurrentLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._CurrentLat = self.Lat + self._CurrentLatLon[0]
self._CurrentLon = self.Lon + self._CurrentLatLon[1]
self._CurrentTime += timedelta(seconds=self.TimeStep1)
# Appends useful data to data dumps
self._TimeDump.append(self._TimeDump[-1] + self.TimeStep1)
self._AltDump.append(self._CurrentAlt)
self._DistDump.append(self._CurrentDist)
logging.info('Current Latitude: %s\nCurrent Longitude: %s\nCurrent Altitude: %s\nCurrent Time: %s', self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
# Check if region travelled exceeds 3 degrees Lat and 3 degrees Lon from the launch site
if abs(self._CurrentLat - self._BaseLat) < 1.5 and abs(self._CurrentLon - self._BaseLon) < 1.5:
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
else:
print 'UAV has travelled out of forecast region.'
print 'New forecast has to be obtained, please wait while forecast is downloaded and processed.'
self._BaseLat = self._CurrentLat
self._BaseLon = self._CurrentLon
# Intialises Weather Environment object
self.WeatherData.append(weather.forecastEnvironment())
# Obtains data from GFS for current flight conditions
self.WeatherData[-1].launchSiteLat = self._BaseLat
self.WeatherData[-1].launchSiteLon = self._BaseLon
self.WeatherData[-1].launchSiteElev = self._CurrentAlt
self.WeatherData[-1].dateAndTime = self._CurrentTime
self.WeatherData[-1].UTC_offset = getUTCOffset(self._BaseLat, self._BaseLon, self._CurrentTime)
# Downloads GFS weather data
self.WeatherData[-1].loadForecast()
logging.info('GFS Data downloaded!')
print "Forecast downloaded!"
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
while self._CurrentAlt < 0.6 * self.Alt and self._CurrentAlt > 0.25 * self.Alt:
# Fly for 3 seconds and recalculate dependent variables
self._Heading = self.FinalHead
self._HeadingDump.append(self._Heading)
# Provides list of time values for ODE solver
time = numpy.linspace(0, self.TimeStep1)
# Variables required for ODE solver
SinA = numpy.sin(self._GlideAngle)
CosA = numpy.cos(self._GlideAngle)
B = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass)
C = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass * self.LD)
# Sets up ODEs to be solved
def z_derivatives(z, t):
return numpy.array([-z[1] * SinA, - B * (z[1] ** 2) + 9.81 * SinA])
def xy_derivatives(xy, t):
return numpy.array([xy[1] * CosA, - C * (xy[1] ** 2) + 9.81 * CosA])
# Solves for change in altitude
z_initial_conditions = numpy.array([self._CurrentAlt, self._XZCurrentSpeed])
z = odeint(z_derivatives, z_initial_conditions, time)
self._CurrentAlt, self._XZCurrentSpeed = z[-1]
# Solves for distance travelled
xy_initial_conditions = numpy.array([self._CurrentDist, self._XZTempSpeed])
xy = odeint(xy_derivatives, xy_initial_conditions, time)
self._CurrentDist, self._XZTempSpeed = xy[-1]
# Appends changes in X and Y directions to X and Y data dumps
if self._Heading == 0:
self._XDirDump.append(self._XDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi:
self._XDirDump.append(self._XDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == 3 * numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
else:
self._XDirDump.append(self._XDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.cos(self._Heading)) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.sin(self._Heading)) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
# Appends the wind conditions to wind data dumps
self._XWindDump.append(self._xyWindSpeed * numpy.cos(self._WindHead))
self._YWindDump.append(self._xyWindSpeed * numpy.sin(self._WindHead))
# Updates parameters used to obtain the next set of forecasts
self._CurrentLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._CurrentLat = self.Lat + self._CurrentLatLon[0]
self._CurrentLon = self.Lon + self._CurrentLatLon[1]
self._CurrentTime += timedelta(seconds=self.TimeStep1)
# Appends useful data to data dumps
self._TimeDump.append(self._TimeDump[-1] + self.TimeStep1)
self._AltDump.append(self._CurrentAlt)
self._DistDump.append(self._CurrentDist)
logging.info('Current Latitude: %s\nCurrent Longitude: %s\nCurrent Altitude: %s\nCurrent Time: %s', self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
# Check if region travelled exceeds 3 degrees Lat and 3 degrees Lon from the launch site
if abs(self._CurrentLat - self._BaseLat) < 1.5 and abs(self._CurrentLon - self._BaseLon) < 1.5:
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
else:
print 'UAV has travelled out of forecast region.'
print 'New forecast has to be obtained, please wait while forecast is downloaded and processed.'
self._BaseLat = self._CurrentLat
self._BaseLon = self._CurrentLon
# Intialises Weather Environment object
self.WeatherData.append(weather.forecastEnvironment())
# Obtains data from GFS for current flight conditions
self.WeatherData[-1].launchSiteLat = self._BaseLat
self.WeatherData[-1].launchSiteLon = self._BaseLon
self.WeatherData[-1].launchSiteElev = self._CurrentAlt
self.WeatherData[-1].dateAndTime = self._CurrentTime
self.WeatherData[-1].UTC_offset = getUTCOffset(self._BaseLat, self._BaseLon, self._CurrentTime)
# Downloads GFS weather data
self.WeatherData[-1].loadForecast()
logging.info('GFS Data downloaded!')
print "Forecast downloaded!"
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
while self._CurrentAlt < 0.25 * self.Alt:
# Fly for TimeStep2 seconds and recalculates dependent variables
# Calculates heading of UAV
self._CurrentHead = numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1])
# Avoids issues at 0, 180, 360 degress
if self.FinalHead < 0:
self._CurrentHead = self._CurrentHead
elif self.FinalHead >= 0 and self.FinalHead < numpy.pi / 8:
self._CurrentHead = self._CurrentHead
else:
if self._CurrentHead < 0:
self._CurrentHead += 2 * numpy.pi
else:
self._CurrentHead = self._CurrentHead
if abs(self._CurrentHead - self.FinalHead) > 0.8:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead
elif abs(self._CurrentHead - self.FinalHead) > 0.5:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.3:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 1.6 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 1.6 * (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.1:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 2.7 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 2.7 * (self.FinalHead - self._CurrentHead)
elif abs(self._CurrentHead - self.FinalHead) > 0.05:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 8 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 8 * (self.FinalHead - self._CurrentHead)
else:
if self._CurrentHead > self.FinalHead:
self._Heading = self.FinalHead - 16 * (self._CurrentHead - self.FinalHead)
else: # self.FinalHead > self._CurrentHead
self._Heading = self.FinalHead + 16 * (self.FinalHead - self._CurrentHead)
self._HeadingDump.append(self._Heading)
# Provides list of time values for ODE solver
time = numpy.linspace(0, self.TimeStep2, num=100)
# Variables required for ODE solver
SinA = numpy.sin(self._GlideAngle)
CosA = numpy.cos(self._GlideAngle)
B = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass)
C = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass * self.LD)
# Sets up ODEs to be solved
def z_derivatives(z, t):
return numpy.array([-z[1] * SinA, - B * (z[1] ** 2) + 9.81 * SinA])
def xy_derivatives(xy, t):
return numpy.array([xy[1] * CosA, - C * (xy[1] ** 2) + 9.81 * CosA])
# Solves for change in altitude
z_initial_conditions = numpy.array([self._CurrentAlt, self._XZCurrentSpeed])
z = odeint(z_derivatives, z_initial_conditions, time)
self._CurrentAlt, self._XZCurrentSpeed = z[-1]
# Solves for distance travelled
xy_initial_conditions = numpy.array([self._CurrentDist, self._XZTempSpeed])
xy = odeint(xy_derivatives, xy_initial_conditions, time)
self._CurrentDist, self._XZTempSpeed = xy[-1]
# Appends changes in X and Y directions to X and Y data dumps
if self._Heading == 0:
self._XDirDump.append(self._XDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == numpy.pi:
self._XDirDump.append(self._XDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == 3 * numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
else:
self._XDirDump.append(self._XDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.cos(self._Heading)) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.sin(self._Heading)) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
# Appends the wind conditions to wind data dumps
self._XWindDump.append(self._xyWindSpeed * numpy.cos(self._WindHead))
self._YWindDump.append(self._xyWindSpeed * numpy.sin(self._WindHead))
# Updates parameters used to obtain the next set of forecasts
self._CurrentLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._CurrentLat = self.Lat + self._CurrentLatLon[0]
self._CurrentLon = self.Lon + self._CurrentLatLon[1]
self._CurrentTime += timedelta(seconds=self.TimeStep2)
# Appends useful data to data dumps
self._TimeDump.append(self._TimeDump[-1] + self.TimeStep2)
self._AltDump.append(self._CurrentAlt)
self._DistDump.append(self._CurrentDist)
logging.info('Current Latitude: %s\nCurrent Longitude: %s\nCurrent Altitude: %s\nCurrent Time: %s', self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
# Check if region travelled exceeds 3 degrees Lat and 3 degrees Lon from the launch site
if abs(self._CurrentLat - self._BaseLat) < 1.5 and abs(self._CurrentLon - self._BaseLon) < 1.5:
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
# Checks if UAV is low enough for calculations to stop
if self._CurrentAlt < 1:
break
else:
print 'UAV has travelled out of forecast region.'
print 'New forecast has to be obtained, please wait while forecast is downloaded and processed.'
self._BaseLat = self._CurrentLat
self._BaseLon = self._CurrentLon
# Intialises Weather Environment object
self.WeatherData.append(weather.forecastEnvironment())
# Obtains data from GFS for current flight conditions
self.WeatherData[-1].launchSiteLat = self._BaseLat
self.WeatherData[-1].launchSiteLon = self._BaseLon
self.WeatherData[-1].launchSiteElev = self._CurrentAlt
self.WeatherData[-1].dateAndTime = self._CurrentTime
self.WeatherData[-1].UTC_offset = getUTCOffset(self._BaseLat, self._BaseLon, self._CurrentTime)
# Downloads GFS weather data
self.WeatherData[-1].loadForecast()
logging.info('GFS Data downloaded!')
print "Forecast downloaded!"
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
# Checks if UAV is low enough for calculations to stop
if self._CurrentAlt < 1:
break
# Prepares output
self._FinalLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._EndHead = numpy.rad2deg(numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1]))
if self._EndHead < 0 and FinalHead > 90:
self._EndHead += 360
logging.info('Final Latitude: %s\nFinal Longitude: %s\nDescent Time: %s', self._FinalLatLon[0] + self.Lat, self._FinalLatLon[1] + self.Lon, (self._CurrentTime - self.StartTime))
print 'Final Latitude: {}\nFinal Longitude: {}\nDescent Time: {}'.format(self._FinalLatLon[0] + self.Lat, self._FinalLatLon[1] + self.Lon, (self._CurrentTime - self.StartTime))
print 'Final Heading: {}\nDesired Heading: {}\nFinal Altitude: {}'.format(self._EndHead, FinalHead, self._CurrentAlt)
# Sets variables for plotting
# Outputs
return [self._XDirDump[-1], self._YDirDump[-1], self._CurrentDist * numpy.cos(self.FinalHead), self._CurrentDist * numpy.sin(self.FinalHead), FinalHead, self._EndHead]
def Fly_4(self, Final_X, Final_Y, Error, FinalHead):
"""
Simulates of gliding UAV flight from a provided altitude; weather data is provided by GFS.
Uses heading-based algorithm to calculate UAV heading.
Inputs: Desired X Distance, Desired Y Distance, Error in X and Y Direction, Desired Heading
"""
# Fly for TimeStep1 seconds and recalculates dependent variables
self.Trigger = False
self.Error = Error
self._BaseXDiff, self._BaseYDiff = Final_X, Final_Y
logging.info('User Input:\nStart Latitude: %s\nStart Longtitude: %s\End X: %s\End Y: %s\nStart Altitude: %s\nStart Time: %s\nLift to Drag Ratio: %s\n \
Mass: %s\nWing Planform Area: %s\nCoefficient of Lift: %s', self.Lat, self.Lon, self._BaseXDiff, self._BaseYDiff, self.Alt, self.StartTime, self.LD,
self.Mass, self.PlanformArea, self.CL)
while self._CurrentAlt > 100:
# Fly for TimeStep1 seconds and recalculates dependent variables
# Calculates heading of UAV
self._XDiff, self._YDiff = self._XDirDump[-1], self._YDirDump[-1]
if (self._BaseXDiff - self._XDiff) == 0 and (self._BaseYDiff - self._YDiff) > 0:
self._Heading = numpy.pi / 2
elif (self._BaseXDiff - self._XDiff) < 0 and (self._BaseYDiff - self._YDiff) == 0:
self._Heading = numpy.pi
elif (self._BaseXDiff - self._XDiff) == 0 and (self._BaseYDiff - self._YDiff) < 0:
self._Heading = 3 * numpy.pi / 2
else:
self._Heading = numpy.arctan2(self._BaseYDiff - self._YDiff, self._BaseXDiff - self._XDiff)
self._HeadingDump.append(self._Heading)
# Provides list of time values for ODE solver
time = numpy.linspace(0, self.TimeStep1)
# Variables required for ODE solver
SinA = numpy.sin(self._GlideAngle)
CosA = numpy.cos(self._GlideAngle)
B = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass)
C = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass * self.LD)
# Sets up ODEs to be solved
def z_derivatives(z, t):
return numpy.array([-z[1] * SinA, - B * (z[1] ** 2) + 9.81 * SinA])
def xy_derivatives(xy, t):
return numpy.array([xy[1] * CosA, - C * (xy[1] ** 2) + 9.81 * CosA])
# Solves for change in altitude
z_initial_conditions = numpy.array([self._CurrentAlt, self._XZCurrentSpeed])
z = odeint(z_derivatives, z_initial_conditions, time)
self._CurrentAlt, self._XZCurrentSpeed = z[-1]
# Solves for distance travelled
xy_initial_conditions = numpy.array([self._CurrentDist, self._XZTempSpeed])
xy = odeint(xy_derivatives, xy_initial_conditions, time)
self._CurrentDist, self._XZTempSpeed = xy[-1]
# Appends changes in X and Y directions to X and Y data dumps
if self._Heading == 0:
self._XDirDump.append(self._XDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == numpy.pi:
self._XDirDump.append(self._XDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
elif self._Heading == 3 * numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
else:
self._XDirDump.append(self._XDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.cos(self._Heading)) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep1)
self._YDirDump.append(self._YDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.sin(self._Heading)) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep1)
# Appends the wind conditions to wind data dumps
self._XWindDump.append(self._xyWindSpeed * numpy.cos(self._WindHead))
self._YWindDump.append(self._xyWindSpeed * numpy.sin(self._WindHead))
# Updates parameters used to obtain the next set of forecasts
self._CurrentLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._CurrentLat = self.Lat + self._CurrentLatLon[0]
self._CurrentLon = self.Lon + self._CurrentLatLon[1]
self._CurrentTime += timedelta(seconds=self.TimeStep1)
# Appends useful data to data dumps
self._TimeDump.append(self._TimeDump[-1] + self.TimeStep1)
self._AltDump.append(self._CurrentAlt)
self._DistDump.append(self._CurrentDist)
if abs(self._BaseXDiff - self._XDirDump[-1]) < self.Error and abs(self._BaseYDiff - self._YDirDump[-1]) < self.Error:
self._EndHead = numpy.rad2deg(numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1]))
self.x_coords.append(self._XDirDump[-1])
self.y_coords.append(self._YDirDump[-1])
self.x_coords_end.append(Final_X)
self.y_coords_end.append(Final_Y)
self.set_heading.append(FinalHead)
self.end_heading.append(self._EndHead)
return
logging.info('Current Latitude: %s\nCurrent Longitude: %s\nCurrent Altitude: %s\nCurrent Time: %s', self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
# Check if region travelled exceeds 3 degrees Lat and 3 degrees Lon from the launch site
if self.Trigger is False:
if abs(self._CurrentLat - self._BaseLat) < 1.5 and abs(self._CurrentLon - self._BaseLon) < 1.5:
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
else:
print 'UAV has travelled out of forecast region.'
print 'Assume UAV stays at border of forecast region, hold xyWindSpeed, WindHead, CurrentDensity'
self._xyWindSpeed = self._xyWindSpeed
self._WindHead = self._WindHead
self._CurrentDensity = self._CurrentDensity
self._DensityDump.append(self._CurrentDensity)
else:
pass
while self._CurrentAlt > 0:
# Fly for TimeStep2 seconds and recalculates dependent variables
self._XDiff, self._YDiff = self._XDirDump[-1], self._YDirDump[-1]
if (self._BaseXDiff - self._XDiff) == 0 and (self._BaseYDiff - self._YDiff) > 0:
self._Heading = numpy.pi / 2
elif (self._BaseXDiff - self._XDiff) < 0 and (self._BaseYDiff - self._YDiff) == 0:
self._Heading = numpy.pi
elif (self._BaseXDiff - self._XDiff) == 0 and (self._BaseYDiff - self._YDiff) < 0:
self._Heading = 3 * numpy.pi / 2
else:
self._Heading = numpy.arctan2(self._BaseYDiff - self._YDiff, self._BaseXDiff - self._XDiff)
self._HeadingDump.append(self._Heading)
# Provides list of time values for ODE solver
time = numpy.linspace(0, self.TimeStep2, num=100)
# Variables required for ODE solver
SinA = numpy.sin(self._GlideAngle)
CosA = numpy.cos(self._GlideAngle)
B = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass)
C = 0.5 * self._CurrentDensity * self.PlanformArea * self.CL / (self.Mass * self.LD)
# Sets up ODEs to be solved
def z_derivatives(z, t):
return numpy.array([-z[1] * SinA, - B * (z[1] ** 2) + 9.81 * SinA])
def xy_derivatives(xy, t):
return numpy.array([xy[1] * CosA, - C * (xy[1] ** 2) + 9.81 * CosA])
# Solves for change in altitude
z_initial_conditions = numpy.array([self._CurrentAlt, self._XZCurrentSpeed])
z = odeint(z_derivatives, z_initial_conditions, time)
self._CurrentAlt, self._XZCurrentSpeed = z[-1]
# Solves for distance travelled
xy_initial_conditions = numpy.array([self._CurrentDist, self._XZTempSpeed])
xy = odeint(xy_derivatives, xy_initial_conditions, time)
self._CurrentDist, self._XZTempSpeed = xy[-1]
# Appends changes in X and Y directions to X and Y data dumps
if self._Heading == 0:
self._XDirDump.append(self._XDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == numpy.pi:
self._XDirDump.append(self._XDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
elif self._Heading == 3 * numpy.pi / 2:
self._XDirDump.append(self._XDirDump[-1] + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] - (self._CurrentDist - self._DistDump[-1]) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
else:
self._XDirDump.append(self._XDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.cos(self._Heading)) + self._xyWindSpeed * numpy.cos(self._WindHead) * self.TimeStep2)
self._YDirDump.append(self._YDirDump[-1] + ((self._CurrentDist - self._DistDump[-1]) * numpy.sin(self._Heading)) + self._xyWindSpeed * numpy.sin(self._WindHead) * self.TimeStep2)
# Appends the wind conditions to wind data dumps
self._XWindDump.append(self._xyWindSpeed * numpy.cos(self._WindHead))
self._YWindDump.append(self._xyWindSpeed * numpy.sin(self._WindHead))
# Updates parameters used to obtain the next set of forecasts
self._CurrentLatLon = m2deg(self._YDirDump[-1], self._XDirDump[-1], self.Lat)
self._CurrentLat = self.Lat + self._CurrentLatLon[0]
self._CurrentLon = self.Lon + self._CurrentLatLon[1]
self._CurrentTime += timedelta(seconds=self.TimeStep2)
# Appends useful data to data dumps
self._TimeDump.append(self._TimeDump[-1] + self.TimeStep2)
self._AltDump.append(self._CurrentAlt)
self._DistDump.append(self._CurrentDist)
if abs(self._BaseXDiff - self._XDirDump[-1]) < self.Error and abs(self._BaseYDiff - self._YDirDump[-1]) < self.Error:
self._EndHead = numpy.rad2deg(numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1]))
self.x_coords.append(self._XDirDump[-1])
self.y_coords.append(self._YDirDump[-1])
self.x_coords_end.append(Final_X)
self.y_coords_end.append(Final_Y)
self.set_heading.append(FinalHead)
self.end_heading.append(self._EndHead)
return
logging.info('Current Latitude: %s\nCurrent Longitude: %s\nCurrent Altitude: %s\nCurrent Time: %s', self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
# Check if region travelled exceeds 3 degrees Lat and 3 degrees Lon from the launch site
if self.Trigger is False:
if abs(self._CurrentLat - self._BaseLat) < 1.5 and abs(self._CurrentLon - self._BaseLon) < 1.5:
# Retrieves changing variables from forecast
self._xyWindSpeed = self.WeatherData[-1].getWindSpeed(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._WindHead = self.WeatherData[-1].getWindDirection(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._CurrentDensity = self.WeatherData[-1].getDensity(self._CurrentLat, self._CurrentLon, self._CurrentAlt, self._CurrentTime)
self._DensityDump.append(self._CurrentDensity)
# Checks if UAV is low enough for calculations to stop
if self._CurrentAlt < 1:
self._EndHead = numpy.rad2deg(numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1]))
self.x_coords_fail.append(self._XDirDump[-1])
self.y_coords_fail.append(self._YDirDump[-1])
self.x_coords_end_fail.append(Final_X)
self.y_coords_end_fail.append(Final_Y)
self.set_heading_fail.append(FinalHead)
self.end_heading_fail.append(self._EndHead)
return
else:
print 'UAV has travelled out of forecast region.'
print 'Assume UAV stays at border of forecast region, hold xyWindSpeed, WindHead, CurrentDensity'
self._xyWindSpeed = self._xyWindSpeed
self._WindHead = self._WindHead
self._CurrentDensity = self._CurrentDensity
self._DensityDump.append(self._CurrentDensity)
# Checks if UAV is low enough for calculations to stop
if self._CurrentAlt < 1:
self._EndHead = numpy.rad2deg(numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1]))
self.x_coords_fail.append(self._XDirDump[-1])
self.y_coords_fail.append(self._YDirDump[-1])
self.x_coords_end_fail.append(Final_X)
self.y_coords_end_fail.append(Final_Y)
self.set_heading_fail.append(FinalHead)
self.end_heading_fail.append(self._EndHead)
return
else:
if self._CurrentAlt < 1:
self._EndHead = numpy.rad2deg(numpy.arctan2(self._YDirDump[-1], self._XDirDump[-1]))
self.x_coords_fail.append(self._XDirDump[-1])
self.y_coords_fail.append(self._YDirDump[-1])
self.x_coords_end_fail.append(Final_X)
self.y_coords_end_fail.append(Final_Y)
self.set_heading_fail.append(FinalHead)
self.end_heading_fail.append(self._EndHead)
return
else:
pass
def PlotContour(self, Fig1, Fig2, Fig3, Fig4, Fig5):
'''
Plots relevant figures to visualise the simulation process.
Figure 1: Density of Air Against Time
Figure 2: Altitude Against Time
Figure 3: Density of Ait Against Altitude
Figure 4: Predicted Flight Path of UAV in 2D
Figure 5: Predicted Flight Path of UAV in 3D
'''
if Fig1 is True:
plt.figure(1)
plt.plot(self._TimeDump, self._DensityDump, 'r--', label='Density')
plt.title('Density of Air Against Time')
plt.xlabel('Time/s')
plt.ylabel('Density of Air/kg/m^3')
plt.legend()
if Fig2 is True:
plt.figure(2)
plt.plot(self._TimeDump, self._AltDump, label='Altitude')
plt.title('Altitude Against Time')
plt.xlabel('Time/s')
plt.ylabel('Altitude/m')
plt.legend()
if Fig3 is True:
plt.figure(3)
plt.plot(self._AltDump, self._DensityDump, 'g', label='Density')
plt.title('Density of Air Against Altitude')
plt.xlabel('Altitude/m')
plt.ylabel('Density of Air/kg/m^3')
plt.legend()
if Fig4 is True:
plt.figure(4)
# plt.plot(self._CurrentDist * numpy.cos(self.FinalHead), self._CurrentDist * numpy.sin(self.FinalHead), 'bo', label='Desired End Point')
plt.plot(0, 0, 'ro', label='Start Point')
plt.plot(self._XDirDump, self._YDirDump, 'b-', label='Flight Path')
plt.plot(self._XDirDump[-1], self._YDirDump[-1], 'go', label='End Point')
plt.title('Predicted Flight Path of UAV')
plt.xlabel('Distance in X Direction/m')
plt.ylabel('Distance in Y Direction/m')
plt.legend()
if Fig5 is True:
fig5 = plt.figure(5)
axes5 = fig5.gca(projection='3d')
axes5.plot([self._CurrentDist * numpy.cos(self.FinalHead)], [self._CurrentDist * numpy.sin(self.FinalHead)], [0], 'bo', label='Desired End Point')
axes5.plot([0], [0], [self.Alt], 'ro', label='Start Point')
axes5.plot(self._XDirDump, self._YDirDump, self._AltDump, label='Flight Path')
axes5.plot([self._XDirDump[-1]], [self._YDirDump[-1]], [self._AltDump[-1]], 'go', label='End Point')
axes5.set_title('Predicted Flight Path of UAV in 3D')
axes5.set_xlabel('Distance in X Direction/m')
axes5.set_ylabel('Distance in Y Direction/m')
axes5.set_zlabel('Distance in Z Direction/m')
axes5.legend()
plt.show()
def Fly_Range(self, Fly, FinalHead, Fig1, Fig2, Fig3, Fig4, Fig5):
"""
Allows user to run Fly_1/Fly_2/Fly_3 multiple times while capturing the data and writing it to a Python file for further prcoessing.
Inputs: Fly_1/Fly_2/Fly_3, [Desired Heading/s], Fig1, Fig2, Fig3, Fig4, Fig5
Plots the results of Fly_1/Fly_2/Fly_3 and writes to a Python file named plot_contours_date.py
"""
# Creates list for UAV directions
uav_direction_range = FinalHead
# Creates empty lists to store x and y coordinates and defined heading for UAV
x_coords = []
y_coords = []
x_coords_end = []
y_coords_end = []
set_heading = []
end_heading = []
# Runs for-loop to collect information for contour plot
for x in uav_direction_range:
print 'Calculations started for the UAV headed towards {} degrees'.format(x)
if Fly == 1:
y = self.Fly_1(x)
self.PlotContour(Fig1, Fig2, Fig3, Fig4, Fig5)
x_coords.append(y[0])
y_coords.append(y[1])
x_coords_end.append(y[2])
y_coords_end.append(y[3])
set_heading.append(y[4])
end_heading.append(y[5])
WriteToFile_Dist(self._StartTime, y[0], y[1], y[4], y[5])
if Fly == 2:
y = self.Fly_2(x)
self.PlotContour(Fig1, Fig2, Fig3, Fig4, Fig5)
x_coords.append(y[0])
y_coords.append(y[1])
x_coords_end.append(y[2])
y_coords_end.append(y[3])
set_heading.append(y[4])
end_heading.append(y[5])
WriteToFile_Head1(self._StartTime, y[0], y[1], y[4], y[5])
if Fly == 3:
y = self.Fly_3(x)
self.PlotContour(Fig1, Fig2, Fig3, Fig4, Fig5)
x_coords.append(y[0])
y_coords.append(y[1])
x_coords_end.append(y[2])
y_coords_end.append(y[3])
set_heading.append(y[4])
end_heading.append(y[5])
WriteToFile_Head2(self._StartTime, y[0], y[1], y[4], y[5])
self.Clear()
print 'Calculations completed for the UAV headed towards {} degrees'.format(x)
# Plots collected data
if len(FinalHead) > 1:
plt.plot(x_coords, y_coords, 'bo', label='Displacement Contour')
plt.plot(x_coords_end, y_coords_end, 'go', label='Defined Contour')
plt.plot(0, 0, 'ro', label="Start")
plt.title('UAV Displacement in Y against UAV Displacement in X\n')
plt.xlabel('UAV Displacement in X')
plt.ylabel('UAV Displacement in Y')
plt.legend()
plt.show()
def Fly_Range_2(self, StartAngle, EndAngle, StartDist, EndDist, DistDiff, Error):
"""
Allows user to run Fly_4 multiple times while capturing the data and writing it to a Python file for further prcoessing.
Inputs: Start Angle, End Angle, First Set of Distances to be Reached, Last Set of Distances to be Reached, Steps between Distances, Error allowed for both X and Y Directions
Plots the results of Fly_4 and writes to a Python file named plot_contours_date_error.py
"""
# Creates list for UAV directions
uav_direction_range = numpy.arange(StartAngle, EndAngle + 1, 1)
# Runs for-loop to collect information for contour plot
for x in uav_direction_range:
print 'Calculations started for the UAV headed towards {} degrees'.format(x)
x_1 = numpy.deg2rad(x)
# Generates waypoints for the UAV to fly to
self.Generate_XY(x_1, StartDist, EndDist + 1, DistDiff)
for z in self.xy:
self.Fly_4(z[0], z[1], Error, x)
# Saves the furthest successful flight
if len(self.x_coords) > 1:
if abs(self.x_coords[-1] ** 2 + self.y_coords[-1] ** 2) > abs(self.x_coords[-2] ** 2 + self.y_coords[-2] ** 2):
self.x_coords_angle_max, self.y_coords_angle_max = self.x_coords[-1], self.y_coords[-1]
self.Clear()
# Appends the furthest successful flight of the set angle and clear variables for next angle
try:
self.x_coords_max.append(self.x_coords_angle_max)
self.y_coords_max.append(self.y_coords_angle_max)
self.x_coords_angle_max, self.y_coords_angle_max = None, None
except:
pass
print 'Calculations completed for the UAV headed towards {} degrees'.format(x)
WriteToFile_Local(self._StartTime, self.x_coords, self.y_coords, self.x_coords_max, self.y_coords_max, self.x_coords_end, self.y_coords_end, self.set_heading, self.end_heading, self.x_coords_fail, self.y_coords_fail, self.x_coords_end_fail, self.y_coords_end_fail, self.set_heading_fail, self.end_heading_fail, self.Error)
# Plots collected data
plt.plot(self.x_coords, self.y_coords, 'bo', label='Displacements')
plt.plot(self.x_coords_max, self.y_coords_max, 'yo', linestyle='-', label='Flight Contour')
if len(self.x_coords) > 1:
plt.fill(self.x_coords_max, self.y_coords_max, 'y', alpha=0.5)
plt.plot(self.x_coords_end, self.y_coords_end, 'go', label='Defined Contour')
plt.plot(0, 0, 'ro', label="Start")
plt.title('Flight Contour\n')
plt.xlabel('UAV Displacement in X')
plt.ylabel('UAV Displacement in Y')
plt.legend()
plt.show()
def Generate_XY(self, FinalHead, StartDist, EndDist, DistDiff):
'''
Generates the distances for the UAV to fly to for Fly_Range_2
'''
x = numpy.arange(StartDist, EndDist, DistDiff)
x = [z * numpy.cos(FinalHead) for z in x]
y = numpy.arange(StartDist, EndDist, DistDiff)
y = [z * numpy.sin(FinalHead) for z in y]
self.xy = zip(x, y)
| gpl-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_toolkits/axes_grid1/parasite_axes.py | 6 | 15477 | import warnings
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.transforms as mtransforms
import matplotlib.collections as mcoll
import matplotlib.legend as mlegend
from matplotlib.axes import subplot_class_factory
from mpl_axes import Axes
from matplotlib.transforms import Bbox
import numpy as np
import matplotlib.cbook as cbook
is_string_like = cbook.is_string_like
class ParasiteAxesBase:
def get_images_artists(self):
artists = set([a for a in self.get_children() if a.get_visible()])
images = set([a for a in self.images if a.get_visible()])
return list(images), list(artists - images)
def __init__(self, parent_axes, **kargs):
self._parent_axes = parent_axes
kargs.update(dict(frameon=False))
self._get_base_axes_attr("__init__")(self, parent_axes.figure,
parent_axes._position, **kargs)
def cla(self):
self._get_base_axes_attr("cla")(self)
martist.setp(self.get_children(), visible=False)
self._get_lines = self._parent_axes._get_lines
# In mpl's Axes, zorders of x- and y-axis are originally set
# within Axes.draw().
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
_parasite_axes_classes = {}
def parasite_axes_class_factory(axes_class=None):
if axes_class is None:
axes_class = Axes
new_class = _parasite_axes_classes.get(axes_class)
if new_class is None:
def _get_base_axes_attr(self, attrname):
return getattr(axes_class, attrname)
new_class = type("%sParasite" % (axes_class.__name__),
(ParasiteAxesBase, axes_class),
{'_get_base_axes_attr': _get_base_axes_attr})
_parasite_axes_classes[axes_class] = new_class
return new_class
ParasiteAxes = parasite_axes_class_factory()
# #class ParasiteAxes(ParasiteAxesBase, Axes):
# @classmethod
# def _get_base_axes_attr(cls, attrname):
# return getattr(Axes, attrname)
class ParasiteAxesAuxTransBase:
def __init__(self, parent_axes, aux_transform, viewlim_mode=None,
**kwargs):
self.transAux = aux_transform
self.set_viewlim_mode(viewlim_mode)
self._parasite_axes_class.__init__(self, parent_axes, **kwargs)
def _set_lim_and_transforms(self):
self.transAxes = self._parent_axes.transAxes
self.transData = \
self.transAux + \
self._parent_axes.transData
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def set_viewlim_mode(self, mode):
if mode not in [None, "equal", "transform"]:
raise ValueError("Unknown mode : %s" % (mode,))
else:
self._viewlim_mode = mode
def get_viewlim_mode(self):
return self._viewlim_mode
def update_viewlim(self):
viewlim = self._parent_axes.viewLim.frozen()
mode = self.get_viewlim_mode()
if mode is None:
pass
elif mode == "equal":
self.axes.viewLim.set(viewlim)
elif mode == "transform":
self.axes.viewLim.set(viewlim.transformed(self.transAux.inverted()))
else:
raise ValueError("Unknown mode : %s" % (self._viewlim_mode,))
def _pcolor(self, method_name, *XYC, **kwargs):
if len(XYC) == 1:
C = XYC[0]
ny, nx = C.shape
gx = np.arange(-0.5, nx, 1.)
gy = np.arange(-0.5, ny, 1.)
X, Y = np.meshgrid(gx, gy)
else:
X, Y, C = XYC
pcolor_routine = self._get_base_axes_attr(method_name)
if kwargs.has_key("transform"):
mesh = pcolor_routine(self, X, Y, C, **kwargs)
else:
orig_shape = X.shape
xy = np.vstack([X.flat, Y.flat])
xyt=xy.transpose()
wxy = self.transAux.transform(xyt)
gx, gy = wxy[:,0].reshape(orig_shape), wxy[:,1].reshape(orig_shape)
mesh = pcolor_routine(self, gx, gy, C, **kwargs)
mesh.set_transform(self._parent_axes.transData)
return mesh
def pcolormesh(self, *XYC, **kwargs):
return self._pcolor("pcolormesh", *XYC, **kwargs)
def pcolor(self, *XYC, **kwargs):
return self._pcolor("pcolor", *XYC, **kwargs)
def _contour(self, method_name, *XYCL, **kwargs):
if len(XYCL) <= 2:
C = XYCL[0]
ny, nx = C.shape
gx = np.arange(0., nx, 1.)
gy = np.arange(0., ny, 1.)
X,Y = np.meshgrid(gx, gy)
CL = XYCL
else:
X, Y = XYCL[:2]
CL = XYCL[2:]
contour_routine = self._get_base_axes_attr(method_name)
if kwargs.has_key("transform"):
cont = contour_routine(self, X, Y, *CL, **kwargs)
else:
orig_shape = X.shape
xy = np.vstack([X.flat, Y.flat])
xyt=xy.transpose()
wxy = self.transAux.transform(xyt)
gx, gy = wxy[:,0].reshape(orig_shape), wxy[:,1].reshape(orig_shape)
cont = contour_routine(self, gx, gy, *CL, **kwargs)
for c in cont.collections:
c.set_transform(self._parent_axes.transData)
return cont
def contour(self, *XYCL, **kwargs):
return self._contour("contour", *XYCL, **kwargs)
def contourf(self, *XYCL, **kwargs):
return self._contour("contourf", *XYCL, **kwargs)
def apply_aspect(self, position=None):
self.update_viewlim()
self._get_base_axes_attr("apply_aspect")(self)
#ParasiteAxes.apply_aspect()
_parasite_axes_auxtrans_classes = {}
def parasite_axes_auxtrans_class_factory(axes_class=None):
if axes_class is None:
parasite_axes_class = ParasiteAxes
elif not issubclass(axes_class, ParasiteAxesBase):
parasite_axes_class = parasite_axes_class_factory(axes_class)
else:
parasite_axes_class = axes_class
new_class = _parasite_axes_auxtrans_classes.get(parasite_axes_class)
if new_class is None:
new_class = type("%sParasiteAuxTrans" % (parasite_axes_class.__name__),
(ParasiteAxesAuxTransBase, parasite_axes_class),
{'_parasite_axes_class': parasite_axes_class,
'name': 'parasite_axes'})
_parasite_axes_auxtrans_classes[parasite_axes_class] = new_class
return new_class
ParasiteAxesAuxTrans = parasite_axes_auxtrans_class_factory(axes_class=ParasiteAxes)
def _get_handles(ax):
handles = ax.lines[:]
handles.extend(ax.patches)
handles.extend([c for c in ax.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in ax.collections
if isinstance(c, mcoll.RegularPolyCollection)])
handles.extend([c for c in ax.collections
if isinstance(c, mcoll.CircleCollection)])
return handles
class HostAxesBase:
def __init__(self, *args, **kwargs):
self.parasites = []
self._get_base_axes_attr("__init__")(self, *args, **kwargs)
def get_aux_axes(self, tr, viewlim_mode="equal", axes_class=None):
parasite_axes_class = parasite_axes_auxtrans_class_factory(axes_class)
ax2 = parasite_axes_class(self, tr, viewlim_mode)
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
self.parasites.append(ax2)
return ax2
def _get_legend_handles(self, legend_handler_map=None):
Axes_get_legend_handles = self._get_base_axes_attr("_get_legend_handles")
all_handles = Axes_get_legend_handles(self, legend_handler_map)
for ax in self.parasites:
all_handles.extend(ax._get_legend_handles(legend_handler_map))
return all_handles
def draw(self, renderer):
orig_artists = list(self.artists)
orig_images = list(self.images)
if hasattr(self, "get_axes_locator"):
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.set_position(pos, which="active")
self.apply_aspect(pos)
else:
self.apply_aspect()
else:
self.apply_aspect()
rect = self.get_position()
for ax in self.parasites:
ax.apply_aspect(rect)
images, artists = ax.get_images_artists()
self.images.extend(images)
self.artists.extend(artists)
self._get_base_axes_attr("draw")(self, renderer)
self.artists = orig_artists
self.images = orig_images
def cla(self):
for ax in self.parasites:
ax.cla()
self._get_base_axes_attr("cla")(self)
#super(HostAxes, self).cla()
def twinx(self, axes_class=None):
"""
call signature::
ax2 = ax.twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_class = parasite_axes_class_factory(axes_class)
ax2 = parasite_axes_class(self, sharex=self, frameon=False)
self.parasites.append(ax2)
# for normal axes
self.axis["right"].toggle(all=False)
self.axis["right"].line.set_visible(True)
ax2.axis["right"].set_visible(True)
ax2.axis["left","top", "bottom"].toggle(all=False)
ax2.axis["left","top", "bottom"].line.set_visible(False)
ax2.axis["right"].toggle(all=True)
ax2.axis["right"].line.set_visible(False)
return ax2
def twiny(self, axes_class=None):
"""
call signature::
ax2 = ax.twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_class = parasite_axes_class_factory(axes_class)
ax2 = parasite_axes_class(self, sharey=self, frameon=False)
self.parasites.append(ax2)
self.axis["top"].toggle(all=False)
self.axis["top"].line.set_visible(True)
ax2.axis["top"].set_visible(True)
ax2.axis["left","right", "bottom"].toggle(all=False)
ax2.axis["left","right", "bottom"].line.set_visible(False)
ax2.axis["top"].toggle(all=True)
ax2.axis["top"].line.set_visible(False)
return ax2
def twin(self, aux_trans=None, axes_class=None):
"""
call signature::
ax2 = ax.twin()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_auxtrans_class = parasite_axes_auxtrans_class_factory(axes_class)
if aux_trans is None:
ax2 = parasite_axes_auxtrans_class(self, mtransforms.IdentityTransform(),
viewlim_mode="equal",
)
else:
ax2 = parasite_axes_auxtrans_class(self, aux_trans,
viewlim_mode="transform",
)
self.parasites.append(ax2)
# for normal axes
#self.yaxis.tick_left()
#self.xaxis.tick_bottom()
#ax2.yaxis.tick_right()
#ax2.xaxis.set_visible(True)
#ax2.yaxis.set_visible(True)
#ax2.yaxis.set_label_position('right')
##ax2.xaxis.tick_top()
#ax2.xaxis.set_label_position('top')
self.axis["top","right"].toggle(all=False)
self.axis["top","right"].line.set_visible(False)
#self.axis["left","bottom"].toggle(label=True)
ax2.axis["top","right"].set_visible(True)
ax2.axis["bottom","left"].toggle(all=False)
ax2.axis["bottom","left"].line.set_visible(False)
ax2.axis["top","right"].toggle(all=True)
ax2.axis["top","right"].line.set_visible(True)
# # for axisline axes
# self._axislines["right"].set_visible(False)
# self._axislines["top"].set_visible(False)
# ax2._axislines["left"].set_visible(False)
# ax2._axislines["bottom"].set_visible(False)
# ax2._axislines["right"].set_visible(True)
# ax2._axislines["top"].set_visible(True)
# ax2._axislines["right"].major_ticklabels.set_visible(True)
# ax2._axislines["top"].major_ticklabels.set_visible(True)
return ax2
def get_tightbbox(self, renderer, call_axes_locator=True):
bbs = [ax.get_tightbbox(renderer, call_axes_locator) \
for ax in self.parasites]
get_tightbbox = self._get_base_axes_attr("get_tightbbox")
bbs.append(get_tightbbox(self, renderer, call_axes_locator))
_bbox = Bbox.union([b for b in bbs if b.width!=0 or b.height!=0])
return _bbox
_host_axes_classes = {}
def host_axes_class_factory(axes_class=None):
if axes_class is None:
axes_class = Axes
new_class = _host_axes_classes.get(axes_class)
if new_class is None:
def _get_base_axes(self):
return axes_class
def _get_base_axes_attr(self, attrname):
return getattr(axes_class, attrname)
new_class = type("%sHostAxes" % (axes_class.__name__),
(HostAxesBase, axes_class),
{'_get_base_axes_attr': _get_base_axes_attr,
'_get_base_axes': _get_base_axes})
_host_axes_classes[axes_class] = new_class
return new_class
def host_subplot_class_factory(axes_class):
host_axes_class = host_axes_class_factory(axes_class=axes_class)
subplot_host_class = subplot_class_factory(host_axes_class)
return subplot_host_class
HostAxes = host_axes_class_factory(axes_class=Axes)
SubplotHost = subplot_class_factory(HostAxes)
def host_axes(*args, **kwargs):
import matplotlib.pyplot as plt
axes_class = kwargs.pop("axes_class", None)
host_axes_class = host_axes_class_factory(axes_class)
fig = plt.gcf()
ax = host_axes_class(fig, *args, **kwargs)
fig.add_axes(ax)
plt.draw_if_interactive()
return ax
def host_subplot(*args, **kwargs):
import matplotlib.pyplot as plt
axes_class = kwargs.pop("axes_class", None)
host_subplot_class = host_subplot_class_factory(axes_class)
fig = plt.gcf()
ax = host_subplot_class(fig, *args, **kwargs)
fig.add_subplot(ax)
plt.draw_if_interactive()
return ax
| mit |
elijah513/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/tools/tools.py | 25 | 18536 | '''
Utility functions models code
'''
from statsmodels.compat.python import reduce, lzip, lmap, asstr2, range
import numpy as np
import numpy.lib.recfunctions as nprf
import numpy.linalg as L
from scipy.linalg import svdvals
from statsmodels.datasets import webuse
from statsmodels.tools.data import _is_using_pandas
from statsmodels.compat.numpy import np_matrix_rank
from pandas import DataFrame
def _make_dictnames(tmp_arr, offset=0):
"""
Helper function to create a dictionary mapping a column number
to the name in tmp_arr.
"""
col_map = {}
for i, col_name in enumerate(tmp_arr):
col_map.update({i+offset : col_name})
return col_map
def drop_missing(Y, X=None, axis=1):
"""
Returns views on the arrays Y and X where missing observations are dropped.
Y : array-like
X : array-like, optional
axis : int
Axis along which to look for missing observations. Default is 1, ie.,
observations in rows.
Returns
-------
Y : array
All Y where the
X : array
Notes
-----
If either Y or X is 1d, it is reshaped to be 2d.
"""
Y = np.asarray(Y)
if Y.ndim == 1:
Y = Y[:, None]
if X is not None:
X = np.array(X)
if X.ndim == 1:
X = X[:, None]
keepidx = np.logical_and(~np.isnan(Y).any(axis),
~np.isnan(X).any(axis))
return Y[keepidx], X[keepidx]
else:
keepidx = ~np.isnan(Y).any(axis)
return Y[keepidx]
# TODO: needs to better preserve dtype and be more flexible
# ie., if you still have a string variable in your array you don't
# want to cast it to float
# TODO: add name validator (ie., bad names for datasets.grunfeld)
def categorical(data, col=None, dictnames=False, drop=False, ):
'''
Returns a dummy matrix given an array of categorical variables.
Parameters
----------
data : array
A structured array, recarray, or array. This can be either
a 1d vector of the categorical variable or a 2d array with
the column specifying the categorical variable specified by the col
argument.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column that contains the variable. For all
arrays `col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
--------
dummy_matrix, [dictnames, optional]
A matrix of dummy (indicator/binary) float variables for the
categorical data. If dictnames is True, then the dictionary
is returned as well.
Notes
-----
This returns a dummy variable for EVERY distinct variable. If a
a structured or recarray is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
Univariate examples
>>> import string
>>> string_var = [string.lowercase[0:5], string.lowercase[5:10], \
string.lowercase[10:15], string.lowercase[15:20], \
string.lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \
('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
'''
if isinstance(col, (list, tuple)):
try:
assert len(col) == 1
col = col[0]
except:
raise ValueError("Can only convert one column at a time")
# TODO: add a NameValidator function
# catch recarrays and structured arrays
if data.dtype.names or data.__class__ is np.recarray:
if not col and np.squeeze(data).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
if isinstance(col, int):
col = data.dtype.names[col]
if col is None and data.dtype.names and len(data.dtype.names) == 1:
col = data.dtype.names[0]
tmp_arr = np.unique(data[col])
# if the cols are shape (#,) vs (#,1) need to add an axis and flip
_swap = True
if data[col].ndim == 1:
tmp_arr = tmp_arr[:, None]
_swap = False
tmp_dummy = (tmp_arr == data[col]).astype(float)
if _swap:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
if not tmp_arr.dtype.names: # how do we get to this code path?
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]
elif tmp_arr.dtype.names:
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]
# prepend the varname and underscore, if col is numeric attribute
# lookup is lost for recarrays...
if col is None:
try:
col = data.dtype.names[0]
except:
col = 'var'
# TODO: the above needs to be made robust because there could be many
# var_yes, var_no varaibles for instance.
tmp_arr = [col + '_' + item for item in tmp_arr]
# TODO: test this for rec and structured arrays!!!
if drop is True:
if len(data.dtype) <= 1:
if tmp_dummy.shape[0] < tmp_dummy.shape[1]:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
dt = lzip(tmp_arr, [tmp_dummy.dtype.str]*len(tmp_arr))
# preserve array type
return np.array(lmap(tuple, tmp_dummy.tolist()),
dtype=dt).view(type(data))
data = nprf.drop_fields(data, col, usemask=False,
asrecarray=type(data) is np.recarray)
data = nprf.append_fields(data, tmp_arr, data=tmp_dummy,
usemask=False,
asrecarray=type(data) is np.recarray)
return data
# handle ndarrays and catch array-like for an error
elif data.__class__ is np.ndarray or not isinstance(data, np.ndarray):
if not isinstance(data, np.ndarray):
raise NotImplementedError("Array-like objects are not supported")
if isinstance(col, int):
offset = data.shape[1] # need error catching here?
tmp_arr = np.unique(data[:, col])
tmp_dummy = (tmp_arr[:, np.newaxis] == data[:, col]).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
offset -= 1
data = np.delete(data, col, axis=1).astype(float)
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset)
return data, col_map
return data
elif col is None and np.squeeze(data).ndim == 1:
tmp_arr = np.unique(data)
tmp_dummy = (tmp_arr[:, None] == data).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
if dictnames is True:
col_map = _make_dictnames(tmp_arr)
return tmp_dummy, col_map
return tmp_dummy
else:
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset=1)
return data, col_map
return data
else:
raise IndexError("The index %s is not understood" % col)
def _series_add_constant(data, prepend, has_constant):
const = np.ones_like(data)
if data.var() == 0:
if has_constant == 'raise':
raise ValueError("data already contains a constant.")
elif has_constant == 'skip':
return data
elif has_constant == 'add':
pass
else:
raise ValueError("Option {0} not understood for "
"has_constant.".format(has_constant))
if not prepend:
columns = [data.name, 'const']
else:
columns = ['const', data.name]
results = DataFrame({data.name : data, 'const' : const}, columns=columns)
return results
def _dataframe_add_constant(data, prepend, has_constant):
# check for const.
if np.any(data.var(0) == 0):
if has_constant == 'raise':
raise ValueError("data already contains a constant.")
elif has_constant == 'skip':
return data
elif has_constant == 'add':
pass
else:
raise ValueError("Option {0} not understood for "
"has_constant.".format(has_constant))
if prepend:
data.insert(0, 'const', 1)
else:
data['const'] = 1
return data
def _pandas_add_constant(data, prepend, has_constant):
from pandas import Series
if isinstance(data, Series):
return _series_add_constant(data, prepend, has_constant)
else:
return _dataframe_add_constant(data, prepend, has_constant)
# TODO: add an axis argument to this for sysreg
def add_constant(data, prepend=True, has_constant='skip'):
"""
This appends a column of ones to an array if prepend==False.
Parameters
----------
data : array-like
`data` is the column-ordered design matrix
prepend : bool
True and the constant is prepended rather than appended.
has_constant : str {'raise', 'add', 'skip'}
Behavior if `data` already has a constant. The default will return
data without adding another constant. If 'raise', will raise an
error if a constant is present. Using 'add' will duplicate the
constant, if one is present. Has no effect for structured or
recarrays. There is no checking for a constant in this case.
Returns
-------
data : array
The original array with a constant (column of ones) as the first or
last column.
"""
if _is_using_pandas(data, None):
# work on a copy
return _pandas_add_constant(data.copy(), prepend, has_constant)
else:
data = np.asarray(data)
if not data.dtype.names:
var0 = data.var(0) == 0
if np.any(var0):
if has_constant == 'raise':
raise ValueError("data already contains a constant.")
elif has_constant == 'skip':
return data
elif has_constant == 'add':
pass
else:
raise ValueError("Option {0} not understood for "
"has_constant.".format(has_constant))
data = np.column_stack((data, np.ones((data.shape[0], 1))))
if prepend:
return np.roll(data, 1, 1)
else:
return_rec = data.__class__ is np.recarray
if prepend:
ones = np.ones((data.shape[0], 1), dtype=[('const', float)])
data = nprf.append_fields(ones, data.dtype.names,
[data[i] for i in data.dtype.names],
usemask=False, asrecarray=return_rec)
else:
data = nprf.append_fields(data, 'const', np.ones(data.shape[0]),
usemask=False, asrecarray=return_rec)
return data
def isestimable(C, D):
""" True if (Q, P) contrast `C` is estimable for (N, P) design `D`
From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if
the contrast `C` is estimable by looking at the rank of ``vstack([C,D])``
and verifying it is the same as the rank of `D`.
Parameters
----------
C : (Q, P) array-like
contrast matrix. If `C` has is 1 dimensional assume shape (1, P)
D: (N, P) array-like
design matrix
Returns
-------
tf : bool
True if the contrast `C` is estimable on design `D`
Examples
--------
>>> D = np.array([[1, 1, 1, 0, 0, 0],
... [0, 0, 0, 1, 1, 1],
... [1, 1, 1, 1, 1, 1]]).T
>>> isestimable([1, 0, 0], D)
False
>>> isestimable([1, -1, 0], D)
True
"""
C = np.asarray(C)
D = np.asarray(D)
if C.ndim == 1:
C = C[None, :]
if C.shape[1] != D.shape[1]:
raise ValueError('Contrast should have %d columns' % D.shape[1])
new = np.vstack([C, D])
if np_matrix_rank(new) != np_matrix_rank(D):
return False
return True
def pinv_extended(X, rcond=1e-15):
"""
Return the pinv of an array X as well as the singular values
used in computation.
Code adapted from numpy.
"""
X = np.asarray(X)
X = X.conjugate()
u, s, vt = np.linalg.svd(X, 0)
s_orig = np.copy(s)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond * np.maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = np.dot(np.transpose(vt), np.multiply(s[:, np.core.newaxis],
np.transpose(u)))
return res, s_orig
def recipr(X):
"""
Return the reciprocal of an array, setting all entries less than or
equal to 0 to 0. Therefore, it presumes that X should be positive in
general.
"""
x = np.maximum(np.asarray(X).astype(np.float64), 0)
return np.greater(x, 0.) / (x + np.less_equal(x, 0.))
def recipr0(X):
"""
Return the reciprocal of an array, setting all entries equal to 0
as 0. It does not assume that X should be positive in
general.
"""
test = np.equal(np.asarray(X), 0)
return np.where(test, 0, 1. / X)
def clean0(matrix):
"""
Erase columns of zeros: can save some time in pseudoinverse.
"""
colsum = np.add.reduce(matrix**2, 0)
val = [matrix[:, i] for i in np.flatnonzero(colsum)]
return np.array(np.transpose(val))
def rank(X, cond=1.0e-12):
"""
Return the rank of a matrix X based on its generalized inverse,
not the SVD.
"""
from warnings import warn
warn("rank is deprecated and will be removed in 0.7."
" Use np.linalg.matrix_rank instead.", FutureWarning)
X = np.asarray(X)
if len(X.shape) == 2:
D = svdvals(X)
return int(np.add.reduce(np.greater(D / D.max(),
cond).astype(np.int32)))
else:
return int(not np.alltrue(np.equal(X, 0.)))
def fullrank(X, r=None):
"""
Return a matrix whose column span is the same as X.
If the rank of X is known it can be specified as r -- no check
is made to ensure that this really is the rank of X.
"""
if r is None:
r = np_matrix_rank(X)
V, D, U = L.svd(X, full_matrices=0)
order = np.argsort(D)
order = order[::-1]
value = []
for i in range(r):
value.append(V[:, order[i]])
return np.asarray(np.transpose(value)).astype(np.float64)
def unsqueeze(data, axis, oldshape):
"""
Unsqueeze a collapsed array
>>> from numpy import mean
>>> from numpy.random import standard_normal
>>> x = standard_normal((3,4,5))
>>> m = mean(x, axis=1)
>>> m.shape
(3, 5)
>>> m = unsqueeze(m, 1, x.shape)
>>> m.shape
(3, 1, 5)
>>>
"""
newshape = list(oldshape)
newshape[axis] = 1
return data.reshape(newshape)
def chain_dot(*arrs):
"""
Returns the dot product of the given matrices.
Parameters
----------
arrs: argument list of ndarray
Returns
-------
Dot product of all arguments.
Examples
--------
>>> import numpy as np
>>> from statsmodels.tools import chain_dot
>>> A = np.arange(1,13).reshape(3,4)
>>> B = np.arange(3,15).reshape(4,3)
>>> C = np.arange(5,8).reshape(3,1)
>>> chain_dot(A,B,C)
array([[1820],
[4300],
[6780]])
"""
return reduce(lambda x, y: np.dot(y, x), arrs[::-1])
def nan_dot(A, B):
"""
Returns np.dot(left_matrix, right_matrix) with the convention that
nan * 0 = 0 and nan * x = nan if x != 0.
Parameters
----------
A, B : np.ndarrays
"""
# Find out who should be nan due to nan * nonzero
should_be_nan_1 = np.dot(np.isnan(A), (B != 0))
should_be_nan_2 = np.dot((A != 0), np.isnan(B))
should_be_nan = should_be_nan_1 + should_be_nan_2
# Multiply after setting all nan to 0
# This is what happens if there were no nan * nonzero conflicts
C = np.dot(np.nan_to_num(A), np.nan_to_num(B))
C[should_be_nan] = np.nan
return C
def maybe_unwrap_results(results):
"""
Gets raw results back from wrapped results.
Can be used in plotting functions or other post-estimation type
routines.
"""
return getattr(results, '_results', results)
class Bunch(dict):
"""
Returns a dict-like object with keys accessible via attribute lookup.
"""
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
webuse = np.deprecate(webuse,
old_name='statsmodels.tools.tools.webuse',
new_name='statsmodels.datasets.webuse',
message='webuse will be removed from the tools '
'namespace in the 0.7.0 release. Please use the'
' new import.')
| bsd-3-clause |
davestanley/compnet-email-classifier | unused_code/svm_example.py | 1 | 1741 | # -*- coding: utf-8 -*-
"""
Created on Fri May 9 23:25:43 2014
@author: davestanley
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, Y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, Y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, Y)
lin_svc = svm.LinearSVC(C=C).fit(X, Y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel',
'LinearSVC (linear kernel)']
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis('off')
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.title(titles[i])
pl.show() | gpl-2.0 |
fukatani/stacked_generalization | stacked_generalization/example/joblibed_classification.py | 1 | 1166 | from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.validation import check_random_state
from stacked_generalization.lib.joblibed import JoblibedClassifier
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Joblibed model
rf = RandomForestClassifier(n_estimators=40,
criterion='gini',
random_state=1)
clf = JoblibedClassifier(rf, "rf")
train_idx, test_idx = list(StratifiedKFold(3).split(iris.data, iris.target))[0]
xs_train = iris.data[train_idx]
y_train = iris.target[train_idx]
xs_test = iris.data[test_idx]
y_test = iris.target[test_idx]
print("First fit and prediction (not cached).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
print("Second fit and prediction (load cache).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
| apache-2.0 |
appapantula/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| mit |
jreback/pandas | pandas/tests/io/json/test_readlines.py | 1 | 9321 | from io import StringIO
from pathlib import Path
import pytest
import pandas as pd
from pandas import DataFrame, read_json
import pandas._testing as tm
from pandas.io.json._json import JsonReader
@pytest.fixture
def lines_json_df():
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
return df.to_json(lines=True, orient="records")
def test_read_jsonl():
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars():
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_to_jsonl():
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
assert result == expected
tm.assert_frame_equal(read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
assert result == expected
tm.assert_frame_equal(read_json(result, lines=True), df)
def test_to_jsonl_count_new_lines():
# GH36888
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
actual_new_lines_count = df.to_json(orient="records", lines=True).count("\n")
expected_new_lines_count = 2
assert actual_new_lines_count == expected_new_lines_count
@pytest.mark.parametrize("chunksize", [1, 1.0])
def test_readjson_chunks(lines_json_df, chunksize):
# Basic test that read_json(chunks=True) gives the same result as
# read_json(chunks=False)
# GH17048: memory usage when lines=True
unchunked = read_json(StringIO(lines_json_df), lines=True)
with read_json(StringIO(lines_json_df), lines=True, chunksize=chunksize) as reader:
chunked = pd.concat(reader)
tm.assert_frame_equal(chunked, unchunked)
def test_readjson_chunksize_requires_lines(lines_json_df):
msg = "chunksize can only be passed if lines=True"
with pytest.raises(ValueError, match=msg):
with pd.read_json(StringIO(lines_json_df), lines=False, chunksize=2) as _:
pass
def test_readjson_chunks_series():
# Test reading line-format JSON to Series with chunksize param
s = pd.Series({"A": 1, "B": 2})
strio = StringIO(s.to_json(lines=True, orient="records"))
unchunked = pd.read_json(strio, lines=True, typ="Series")
strio = StringIO(s.to_json(lines=True, orient="records"))
with pd.read_json(strio, lines=True, typ="Series", chunksize=1) as reader:
chunked = pd.concat(reader)
tm.assert_series_equal(chunked, unchunked)
def test_readjson_each_chunk(lines_json_df):
# Other tests check that the final result of read_json(chunksize=True)
# is correct. This checks the intermediate chunks.
with pd.read_json(StringIO(lines_json_df), lines=True, chunksize=2) as reader:
chunks = list(reader)
assert chunks[0].shape == (2, 2)
assert chunks[1].shape == (1, 2)
def test_readjson_chunks_from_file():
with tm.ensure_clean("test.json") as path:
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df.to_json(path, lines=True, orient="records")
with pd.read_json(path, lines=True, chunksize=1) as reader:
chunked = pd.concat(reader)
unchunked = pd.read_json(path, lines=True)
tm.assert_frame_equal(unchunked, chunked)
@pytest.mark.parametrize("chunksize", [None, 1])
def test_readjson_chunks_closes(chunksize):
with tm.ensure_clean("test.json") as path:
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df.to_json(path, lines=True, orient="records")
reader = JsonReader(
path,
orient=None,
typ="frame",
dtype=True,
convert_axes=True,
convert_dates=True,
keep_default_dates=True,
numpy=False,
precise_float=False,
date_unit=None,
encoding=None,
lines=True,
chunksize=chunksize,
compression=None,
nrows=None,
)
with reader:
reader.read()
assert (
reader.handles.handle.closed
), f"didn't close stream with chunksize = {chunksize}"
@pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"])
def test_readjson_invalid_chunksize(lines_json_df, chunksize):
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
with pd.read_json(
StringIO(lines_json_df), lines=True, chunksize=chunksize
) as _:
pass
@pytest.mark.parametrize("chunksize", [None, 1, 2])
def test_readjson_chunks_multiple_empty_lines(chunksize):
j = """
{"A":1,"B":4}
{"A":2,"B":5}
{"A":3,"B":6}
"""
orig = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
test = pd.read_json(j, lines=True, chunksize=chunksize)
if chunksize is not None:
with test:
test = pd.concat(test)
tm.assert_frame_equal(orig, test, obj=f"chunksize: {chunksize}")
def test_readjson_unicode(monkeypatch):
with tm.ensure_clean("test.json") as path:
monkeypatch.setattr("_bootlocale.getpreferredencoding", lambda l: "cp949")
with open(path, "w", encoding="utf-8") as f:
f.write('{"£©µÀÆÖÞßéöÿ":["АБВГДабвгд가"]}')
result = read_json(path)
expected = DataFrame({"£©µÀÆÖÞßéöÿ": ["АБВГДабвгд가"]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1, 2])
def test_readjson_nrows(nrows):
# GH 33916
# Test reading line-format JSON to Series with nrows param
jsonl = """{"a": 1, "b": 2}
{"a": 3, "b": 4}
{"a": 5, "b": 6}
{"a": 7, "b": 8}"""
result = pd.read_json(jsonl, lines=True, nrows=nrows)
expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows,chunksize", [(2, 2), (4, 2)])
def test_readjson_nrows_chunks(nrows, chunksize):
# GH 33916
# Test reading line-format JSON to Series with nrows and chunksize param
jsonl = """{"a": 1, "b": 2}
{"a": 3, "b": 4}
{"a": 5, "b": 6}
{"a": 7, "b": 8}"""
with read_json(jsonl, lines=True, nrows=nrows, chunksize=chunksize) as reader:
chunked = pd.concat(reader)
expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
tm.assert_frame_equal(chunked, expected)
def test_readjson_nrows_requires_lines():
# GH 33916
# Test ValuError raised if nrows is set without setting lines in read_json
jsonl = """{"a": 1, "b": 2}
{"a": 3, "b": 4}
{"a": 5, "b": 6}
{"a": 7, "b": 8}"""
msg = "nrows can only be passed if lines=True"
with pytest.raises(ValueError, match=msg):
pd.read_json(jsonl, lines=False, nrows=2)
def test_readjson_lines_chunks_fileurl(datapath):
# GH 27135
# Test reading line-format JSON from file url
df_list_expected = [
DataFrame([[1, 2]], columns=["a", "b"], index=[0]),
DataFrame([[3, 4]], columns=["a", "b"], index=[1]),
DataFrame([[5, 6]], columns=["a", "b"], index=[2]),
]
os_path = datapath("io", "json", "data", "line_delimited.json")
file_url = Path(os_path).as_uri()
with pd.read_json(file_url, lines=True, chunksize=1) as url_reader:
for index, chuck in enumerate(url_reader):
tm.assert_frame_equal(chuck, df_list_expected[index])
def test_chunksize_is_incremental():
# See https://github.com/pandas-dev/pandas/issues/34548
jsonl = (
"""{"a": 1, "b": 2}
{"a": 3, "b": 4}
{"a": 5, "b": 6}
{"a": 7, "b": 8}\n"""
* 1000
)
class MyReader:
def __init__(self, contents):
self.read_count = 0
self.stringio = StringIO(contents)
def read(self, *args):
self.read_count += 1
return self.stringio.read(*args)
def __iter__(self):
self.read_count += 1
return iter(self.stringio)
reader = MyReader(jsonl)
assert len(list(pd.read_json(reader, lines=True, chunksize=100))) > 1
assert reader.read_count > 10
| bsd-3-clause |
lheagy/casingResearch | casingSimulations/sources.py | 2 | 32188 | import numpy as np
import matplotlib.pyplot as plt
import os
import properties
import discretize
from discretize.utils import closestPoints
from SimPEG.utils import setKwargs
from SimPEG.electromagnetics import frequency_domain as fdem
from SimPEG.electromagnetics import time_domain as tdem
from .base import LoadableInstance, BaseCasing
from . import model
from .mesh import BaseMeshGenerator
from .info import __version__
class BaseCasingSrc(BaseCasing):
"""
The base class for sources. Inherit this to attach properties.
"""
filename = properties.String(
"filename to serialize properties to",
default="Source.json"
)
modelParameters = LoadableInstance(
"casing parameters",
model.Wholespace
)
meshGenerator = LoadableInstance(
"mesh generator instance",
BaseMeshGenerator
)
physics = properties.StringChoice(
"fdem or tdem simulation?",
choices=["fdem", "tdem"],
required=False
)
src_a = properties.Array(
"A electrode location"
)
src_b = properties.Array(
"B electrode location"
)
def __init__(self, **kwargs):
setKwargs(self, **kwargs)
if self.src_a is None:
self.src_a = self.modelParameters.src_a
if self.src_b is None:
self.src_b = self.modelParameters.src_b
assert self.src_a[1] == self.src_b[1], (
'non y-axis aligned sources have not been implemented'
)
@property
def mesh(self):
"""
discretize mesh
"""
return self.meshGenerator.mesh
# @property
# def src_a(self):
# """
# location of the a-electrode
# """
# if getattr(self, '_src_a', None) is None:
# return self.modelParameters.src_a
# return self._src_a
# @src_a.setter
# def src_a(self, value):
# self._src_a = value
# @property
# def src_b(self):
# """
# location of the b-electrode
# """
# if getattr(self, '_src_b', None) is None:
# return self.modelParameters.src_b
# return self._src_b
# @src_b.setter
# def src_b(self, value):
# self._src_b = value
@property
def casing_a(self):
"""
inner radius of the casing
"""
return self.modelParameters.casing_a
@property
def freqs(self):
"""
frequencies to consider
"""
return self.modelParameters.freqs
@property
def srcList(self):
"""
Source List
"""
if getattr(self, '_srcList', None) is None:
if self.physics.lower() == "fdem":
srcList = [
fdem.sources.RawVec_e([], f, self.s_e.astype("complex"))
for f in self.freqs
]
elif self.physics == "tdem":
srcList = [tdem.sources.RawVec_Grounded([], self.s_e)]
self._srcList = srcList
return self._srcList
class HorizontalElectricDipole(BaseCasingSrc):
"""
A horizontal electric dipole
"""
def __init__(self, **kwargs):
super(HorizontalElectricDipole, self).__init__(**kwargs)
assert self.src_a[2] == self.src_b[2], (
'z locations must be the same for a HED'
)
@property
def src_a_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_a_closest', None) is None:
# find the z location of the closest face to the src
src_a_closest = (
self.mesh.gridFx[closestPoints(self.mesh, self.src_a, 'Fz'), :]
)
assert(len(src_a_closest) == 1), 'multiple source locs found'
self._src_a_closest = src_a_closest[0]
return self._src_a_closest
@property
def src_b_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_b_closest', None) is None:
# find the z location of the closest face to the src
src_b_closest = (
self.mesh.gridFx[closestPoints(self.mesh, self.src_b, 'Fz'), :]
)
assert(len(src_b_closest) == 1), 'multiple source locs found'
self._src_b_closest = src_b_closest[0]
return self._src_b_closest
@property
def surface_wire(self):
"""
Horizontal part of the wire that runs along the surface
(one cell above) from the center of the well to the return electrode
"""
if getattr(self, '_surface_wire', None) is None:
mesh = self.mesh
src_a = self.src_a
src_b = self.src_b
# horizontally directed wire
surface_wirex = (
(
mesh.gridFx[:, 0] <= np.max(
[self.src_a[0], self.src_b[0]]
)
) &
(
mesh.gridFx[:, 0] >= np.min(
[self.src_a[0], self.src_b[0]]
)
)
)
surface_wirez = (
(mesh.gridFx[:, 2] > src_b[2] - self.mesh.hz.min()/2.) &
(mesh.gridFx[:, 2] < src_b[2] + self.mesh.hz.min()/2.)
)
self._surface_wire = surface_wirex & surface_wirez
if getattr(mesh, 'isSymmetric', False) is False:
surface_wirey = (
(mesh.gridFx[:, 1] > src_b[1] - mesh.hy.min()/2.) &
(mesh.gridFx[:, 1] < src_b[1] + mesh.hy.min()/2.)
)
self._surface_wire = (
self._surface_wire & surface_wirey
)
return self._surface_wire
@property
def surface_wire_direction(self):
"""
direction of the source wire
"""
# todo: extend to the case where the wire is not along the x-axis
return [-1. if self.src_a[0] < self.src_b[0] else 1.][0]
@property
def s_e(self):
"""
electric source term used to build the right hand side of the maxwell
system
"""
if getattr(self, '_s_e', None) is None:
# downhole source
s_x = np.zeros(self.mesh.vnF[0])
s_y = np.zeros(self.mesh.vnF[1])
s_z = np.zeros(self.mesh.vnF[2])
# horizontal part of wire along surface
s_x[self.surface_wire] = self.surface_wire_direction
# assemble the source (downhole grounded primary)
s_e = np.hstack([s_x, s_y, s_z])
self._s_e = s_e/self.mesh.area
# self._s_e = self.mesh.getFaceInnerProduct(invMat=True) * s_e
return self._s_e
def plot(self, ax=None):
"""
Plot the source.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
mesh = self.mesh
ax.plot(
mesh.gridFx[self.surface_wire, 0],
mesh.gridFx[self.surface_wire, 2], 'r{}'.format(
['<' if self.surface_wire_direction == -1. else '>'][0]
)
)
@properties.validator
def _check_wire(self):
"""
Make sure that each segment of the wire is only going through a
single face
.. todo:: check that
"""
# check the surface wire only has one y and one z location
surface_wire = self.mesh.gridFx[self.surface_wire, :]
assert len(np.unique(surface_wire[:, 1])) == 1, (
'the surface wire has more than one y-location'
)
assert len(np.unique(surface_wire[:, 2])) == 1, (
'the surface wire has more than one z-location'
)
class VerticalElectricDipole(BaseCasingSrc):
"""
A vertical electric dipole. It is not coupled to the casing
:param CasingSimulations.Model.CasingProperties modelParameters: a casing properties instance
:param discretize.BaseMesh mesh: a discretize mesh
"""
def __init__(self, **kwargs):
super(VerticalElectricDipole, self).__init__(**kwargs)
assert all(self.src_a[:2] == self.src_b[:2]), (
'src_a and src_b must have the same horizontal location'
)
@property
def src_a_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_a_closest', None) is None:
# find the z location of the closest face to the src
src_a_closest = (
self.mesh.gridFz[closestPoints(self.mesh, self.src_a, 'Fz'), :]
)
assert(len(src_a_closest) == 1), 'multiple source locs found'
self._src_a_closest = src_a_closest[0]
return self._src_a_closest
@property
def src_b_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_b_closest', None) is None:
# find the z location of the closest face to the src
src_b_closest = (
self.mesh.gridFz[closestPoints(self.mesh, self.src_b, 'Fz'), :]
)
assert(len(src_b_closest) == 1), 'multiple source locs found'
self._src_b_closest = src_b_closest[0]
return self._src_b_closest
@property
def _wire_direction(self):
if self.src_a_closest[2] < self.src_b_closest[2]:
return -1
return 1
@property
def wire_in_borehole(self):
"""
Indices of the verically directed wire inside of the borehole. It goes
through the center of the well
"""
if getattr(self, '_wire_in_borehole', None) is None:
mesh = self.mesh
src_a = self.src_a
src_b = self.src_b
wire_in_boreholex = (
(mesh.gridFz[:, 0] < self.src_a_closest[0] + mesh.hx.min()/2.) &
(mesh.gridFz[:, 0] > self.src_a_closest[0] - mesh.hx.min()/2.)
)
wire_in_boreholez = (
(
mesh.gridFz[:, 2] >=
np.min([src_a[2], src_b[2]]) - 0.5*mesh.hz.min()
) &
(
mesh.gridFz[:, 2] <=
np.max([src_a[2], src_b[2]]) + 0.5*mesh.hz.min()
)
)
self._wire_in_borehole = wire_in_boreholex & wire_in_boreholez
if getattr(mesh, 'isSymmetric', False) is False:
wire_in_boreholey = (
(mesh.gridFz[:, 1] > src_a[1] - mesh.hy.min()/2.) &
(mesh.gridFz[:, 1] < src_a[1] + mesh.hy.min()/2.)
)
self._wire_in_borehole = (
self._wire_in_borehole & wire_in_boreholey
)
return self._wire_in_borehole
@property
def s_e(self):
"""
Source List
"""
if getattr(self, '_s_e', None) is None:
# downhole source
s_x = np.zeros(self.mesh.vnF[0])
s_y = np.zeros(self.mesh.vnF[1])
s_z = np.zeros(self.mesh.vnF[2])
s_z[self.wire_in_borehole] = self._wire_direction # part of wire through borehole
# assemble the source (downhole grounded primary)
s_e = np.hstack([s_x, s_y, s_z])
self._s_e = s_e/self.mesh.area
return self._s_e
def plot(self, ax=None):
"""
Plot the source.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
mesh = self.mesh
ax.plot(
mesh.gridFz[self.wire_in_borehole, 0],
mesh.gridFz[self.wire_in_borehole, 2],
'rv' if self._wire_direction < 0 else 'r^'
)
@properties.validator
def _check_wire(self):
"""
Make sure that each segment of the wire is only going through a
single face
.. todo:: check that the wirepath is infact connected.
"""
# check that the wire inside the borehole has only one x, y, location
wire_in_borehole = self.mesh.gridFz[self.wire_in_borehole, :]
assert len(np.unique(wire_in_borehole[:, 0])) == 1, (
'the wire in borehole has more than one x-location'
)
assert len(np.unique(wire_in_borehole[:, 1])) == 1, (
'the wire in borehole has more than one y-location'
)
return True
class DownHoleTerminatingSrc(BaseCasingSrc):
"""
A source that terminates down-hole. It is not coupled to the casing
:param CasingSimulations.Model.CasingProperties modelParameters: a casing properties instance
:param discretize.BaseMesh mesh: a discretize mesh
"""
def __init__(self, **kwargs):
super(DownHoleTerminatingSrc, self).__init__(**kwargs)
@property
def src_a_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_a_closest', None) is None:
# find the z location of the closest face to the src
src_a_closest = (
self.mesh.gridFz[closestPoints(self.mesh, self.src_a, 'Fz'), :]
)
assert(len(src_a_closest) == 1), 'multiple source locs found'
self._src_a_closest = src_a_closest[0]
return self._src_a_closest
@property
def src_b_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_b_closest', None) is None:
# find the z location of the closest face to the src
src_b_closest = (
self.mesh.gridFz[closestPoints(self.mesh, self.src_b, 'Fz'), :]
)
assert(len(src_b_closest) == 1), 'multiple source locs found'
self._src_b_closest = src_b_closest[0]
return self._src_b_closest
@property
def wire_in_borehole(self):
"""
Indices of the verically directed wire inside of the borehole. It goes
through the center of the well
"""
if getattr(self, '_wire_in_borehole', None) is None:
mesh = self.mesh
src_a = self.src_a
src_b = self.src_b
wire_in_boreholex = (
(mesh.gridFz[:, 0] < self.src_a_closest[0] + mesh.hx.min()/2.) &
(mesh.gridFz[:, 0] > self.src_a_closest[0] - mesh.hx.min()/2.)
)
wire_in_boreholez = (
(mesh.gridFz[:, 2] >= src_a[2] - 0.5*mesh.hz.min()) &
(mesh.gridFz[:, 2] < src_b[2] + 1.5*mesh.hz.min())
)
self._wire_in_borehole = wire_in_boreholex & wire_in_boreholez
if getattr(mesh, 'isSymmetric', False) is False:
wire_in_boreholey = (
(mesh.gridFz[:, 1] > src_a[1] - mesh.hy.min()/2.) &
(mesh.gridFz[:, 1] < src_a[1] + mesh.hy.min()/2.)
)
self._wire_in_borehole = (
self._wire_in_borehole & wire_in_boreholey
)
return self._wire_in_borehole
@property
def surface_wire(self):
"""
Horizontal part of the wire that runs along the surface
(one cell above) from the center of the well to the return electrode
"""
if getattr(self, '_surface_wire', None) is None:
mesh = self.mesh
src_a = self.src_a
src_b = self.src_b
# horizontally directed wire
surface_wirex = (
(
mesh.gridFx[:, 0] <= np.max(
[self.src_a_closest[0], self.src_b_closest[0]]
)
) &
(
mesh.gridFx[:, 0] >= np.min(
[self.src_a_closest[0], self.src_b_closest[0]]
)
)
)
surface_wirez = (
(mesh.gridFx[:, 2] > mesh.hz.min()) &
(mesh.gridFx[:, 2] <= 2*mesh.hz.min())
)
self._surface_wire = surface_wirex & surface_wirez
if getattr(mesh, 'isSymmetric', False) is False:
surface_wirey = (
(mesh.gridFx[:, 1] > src_b[1] - mesh.hy.min()/2.) &
(mesh.gridFx[:, 1] < src_b[1] + mesh.hy.min()/2.)
)
self._surface_wire = self._surface_wire & surface_wirey
return self._surface_wire
@property
def surface_electrode(self):
"""
Return electrode on the surface
"""
if getattr(self, '_surface_electrode', None) is None:
mesh = self.mesh
src_a = self.src_a_closest
src_b = self.src_b_closest
# return electrode
surface_electrodex = (
(mesh.gridFz[:, 0] > self.src_b_closest[0] - mesh.hx.min()/2.) &
(mesh.gridFz[:, 0] < self.src_b_closest[0] + mesh.hx.min()/2.)
)
surface_electrodez = (
(mesh.gridFz[:, 2] >= src_b[2] - mesh.hz.min()) &
(mesh.gridFz[:, 2] < 2*mesh.hz.min())
)
self._surface_electrode = surface_electrodex & surface_electrodez
if getattr(mesh, 'isSymmetric', False) is False:
surface_electrodey = (
(mesh.gridFz[:, 1] > src_b[1] - mesh.hy.min()/2.) &
(mesh.gridFz[:, 1] < src_b[1] + mesh.hy.min()/2.)
)
self._surface_electrode = (
self._surface_electrode & surface_electrodey
)
return self._surface_electrode
@property
def surface_wire_direction(self):
# todo: extend to the case where the wire is not along the x-axis
return [-1. if self.src_a[0] < self.src_b[0] else 1.][0]
@property
def s_e(self):
"""
Source List
"""
if getattr(self, '_srcList', None) is None:
# downhole source
s_x = np.zeros(self.mesh.vnF[0])
s_y = np.zeros(self.mesh.vnF[1])
s_z = np.zeros(self.mesh.vnF[2])
s_z[self.wire_in_borehole] = -1. # part of wire through borehole
s_x[self.surface_wire] = self.surface_wire_direction # horizontal part of wire along surface
s_z[self.surface_electrode] = 1. # vertical part of return electrode
# assemble the source (downhole grounded primary)
s_e = np.hstack([s_x, s_y, s_z])
self._s_e = s_e/self.mesh.area
return self._s_e
def plot(self, ax=None):
"""
Plot the source.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
mesh = self.mesh
ax.plot(
mesh.gridFz[self.wire_in_borehole, 0],
mesh.gridFz[self.wire_in_borehole, 2], 'rv'
)
ax.plot(
mesh.gridFz[self.surface_electrode, 0],
mesh.gridFz[self.surface_electrode, 2], 'r^'
)
ax.plot(
mesh.gridFx[self.surface_wire, 0],
mesh.gridFx[self.surface_wire, 2], 'r{}'.format(
['<' if self.surface_wire_direction == -1. else '>'][0]
)
)
@properties.validator
def _check_wire(self):
"""
Make sure that each segment of the wire is only going through a
single face
.. todo:: check that
"""
# check the surface electrode only has one x and one y location
surface_electrode = self.mesh.gridFz[self.surface_electrode, :]
assert len(np.unique(surface_electrode[:, 0])) == 1, (
'the surface electrode has more than one x-location'
)
assert len(np.unique(surface_electrode[:, 1])) == 1, (
'the surface electrode has more than one y-location'
)
# check the surface wire only has one y and one z location
surface_wire = self.mesh.gridFx[self.surface_wire, :]
assert len(np.unique(surface_wire[:, 1])) == 1, (
'the surface wire has more than one y-location'
)
assert len(np.unique(surface_wire[:, 2])) == 1, (
'the surface wire has more than one z-location'
)
# check that the wire inside the borehole has only one x, y, location
wire_in_borehole = self.mesh.gridFz[self.wire_in_borehole, :]
assert len(np.unique(wire_in_borehole[:, 0])) == 1, (
'the wire in borehole has more than one x-location'
)
assert len(np.unique(wire_in_borehole[:, 1])) == 1, (
'the wire in borehole has more than one y-location'
)
return True
# Source Grounded on Casing
class DownHoleCasingSrc(DownHoleTerminatingSrc):
"""
Source that is coupled to the casing down-hole and has a return electrode
at the surface.
:param CasingSimulations.Model.CasingProperties modelParameters: a casing properties instance
:param discretize.CylMesh mesh: a cylindrical mesh
"""
def __init__(self, **kwargs):
super(DownHoleCasingSrc, self).__init__(**kwargs)
@property
def downhole_electrode(self):
"""
Down-hole horizontal part of the wire, coupled to the casing
"""
if getattr(self, '_downhole_electrode', None) is None:
mesh = self.mesh
src_a = self.src_a_closest
src_b = self.src_b_closest
# couple to the casing downhole - top part
downhole_electrode_indx = mesh.gridFx[:, 0] <= self.casing_a # + mesh.hx.min()*2
# couple to the casing downhole - bottom part
downhole_electrode_indz2 = (
(mesh.gridFx[:, 2] <= src_a[2]) &
(mesh.gridFx[:, 2] > src_a[2] - mesh.hz.min())
)
self._downhole_electrode = (
downhole_electrode_indx & downhole_electrode_indz2
)
if getattr(mesh, 'isSymmetric', False) is False:
dowhhole_electrode_indy = (
(mesh.gridFx[:, 1] > src_a[1] - mesh.hy.min()/2.) &
(mesh.gridFx[:, 1] < src_a[1] + mesh.hy.min()/2.)
)
self._downhole_electrode = (
self._downhole_electrode & dowhhole_electrode_indy
)
return self._downhole_electrode
@property
def s_e(self):
"""
Source current density on faces
"""
if getattr(self, '_srcList', None) is None:
# downhole source
s_x = np.zeros(self.mesh.vnF[0])
s_y = np.zeros(self.mesh.vnF[1])
s_z = np.zeros(self.mesh.vnF[2])
s_z[self.wire_in_borehole] = -1. # part of wire through borehole
s_x[self.downhole_electrode] = 1. # downhole hz part of wire
s_x[self.surface_wire] = -1. # horizontal part of wire along surface
s_z[self.surface_electrode] = 1. # vertical part of return electrode
# assemble the source (downhole grounded primary)
s_e = np.hstack([s_x, s_y, s_z])
self._s_e = s_e/self.mesh.area
return self._s_e
def plot(self, ax=None):
"""
Plot the source.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
mesh = self.mesh
super(DownHoleCasingSrc, self).plot(ax=ax)
ax.plot(
mesh.gridFx[self.downhole_electrode, 0],
mesh.gridFx[self.downhole_electrode, 2], 'r>'
)
return ax
@properties.validator
def _check_wire_more(self):
"""
Make sure that each segment of the wire is only going through a
single face
.. todo:: check that
"""
# check that the down-hole electrode has only one y, one z location
downhole_electrode = self.mesh.gridFx[self.downhole_electrode, :]
assert len(np.unique(downhole_electrode[:, 1])) == 1, (
'the downhole electrode has more than one y-location'
)
assert len(np.unique(downhole_electrode[:, 2])) == 1, (
'the downhole electrode has more than one z-location'
)
return True
class SurfaceGroundedSrc(DownHoleTerminatingSrc):
"""
Source that has two surface electrodes, neither connected to the casing.
"""
def __init__(self, **kwargs):
super(SurfaceGroundedSrc, self).__init__(**kwargs)
@property
def positive_electrode(self):
if getattr(self, '_positive_electrode', None) is None:
mesh = self.mesh
src_a = self.src_a_closest
src_b = self.src_b_closest
positive_electrodex = (mesh.gridFz[:, 0] == src_a[0])
positive_electrodez = (
(mesh.gridFz[:, 2] >= src_a[2]) &
(mesh.gridFz[:, 2] < 1.5*mesh.hz.min())
)
self._positive_electrode = (
positive_electrodex & positive_electrodez
)
if getattr(mesh, 'isSymmetric', False) is False:
positive_electrodey = (
(mesh.gridFz[:, 1] > src_a[1] - mesh.hy.min()) &
(mesh.gridFz[:, 1] < src_a[1] + mesh.hy.min())
)
self._positive_electrode = (
self._positive_electrode & positive_electrodey
)
return self._positive_electrode
# @property
# def surface_wire(self):
# """
# indices of the wire that runs along the surface
# """
# if getattr(self, '_surface_wire', None) is None:
# mesh = self.mesh
# src_a = self.src_a
# src_b = self.src_b
# # horizontally directed wire
# surface_wirex = (
# (mesh.gridFx[:, 0] <= self.src_b_closest[0]) &
# (mesh.gridFx[:, 0] >= self.src_a_closest[0])
# )
# surface_wirez = (
# (mesh.gridFx[:, 2] > src_b[2] + mesh.hz.min()) &
# (mesh.gridFx[:, 2] < src_b[2] + 2*mesh.hz.min())
# )
# self._surface_wire = surface_wirex & surface_wirez
# if getattr(mesh, 'isSymmetric', False) is False:
# surface_wirey = (
# (mesh.gridFx[:, 1] < src_b[1] + mesh.hy.min()/2.) &
# (mesh.gridFx[:, 1] > src_b[1] - mesh.hy.min()/2.)
# )
# self._surface_wire = self._surface_wire & surface_wirey
# return self._surface_wire
@property
def s_e(self):
"""
source list
"""
if getattr(self, '_srcList', None) is None:
# downhole source
s_x = np.zeros(self.mesh.vnF[0])
s_y = np.zeros(self.mesh.vnF[1])
s_z = np.zeros(self.mesh.vnF[2])
s_z[self.positive_electrode] = -1. # part of wire coupled to casing
s_x[self.surface_wire] = self.surface_wire_direction # horizontal part of wire along surface
s_z[self.surface_electrode] = 1. # vertical part of return electrode
# assemble se source (downhole grounded primary)
s_e = np.hstack([s_x, s_y, s_z])
self._s_e = s_e/self.mesh.area
# self._s_e = self.mesh.getFaceInnerProduct(invMat=True) * s_e
return self._s_e
def plot(self, ax=None):
"""
plot the source on the mesh.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
mesh = self.mesh
ax.plot(
mesh.gridFz[self.positive_electrode, 0],
mesh.gridFz[self.positive_electrode, 2], 'rv'
)
ax.plot(
mesh.gridFz[self.surface_electrode, 0],
mesh.gridFz[self.surface_electrode, 2], 'r^'
)
ax.plot(
mesh.gridFx[self.surface_wire, 0],
mesh.gridFx[self.surface_wire, 2], 'r{}'.format(
['<' if self.surface_wire_direction == -1. else '>'][0]
)
)
return ax
@properties.validator
def _check_wire(self):
"""
Make sure that each segment of the wire is only going through a
single face
"""
# check the surface electrode only has one x and one y location
surface_electrode = self.mesh.gridFz[self.surface_electrode, :]
assert len(np.unique(surface_electrode[:, 0])) == 1, (
'the surface electrode has more than one x-location'
)
assert len(np.unique(surface_electrode[:, 1])) == 1, (
'the surface electrode has more than one y-location'
)
# check the top casing electrode only has one x and one y location
positive_electrode = self.mesh.gridFz[self.positive_electrode, :]
assert len(np.unique(positive_electrode[:, 0])) == 1, (
'the tophole electrode has more than one x-location'
)
assert len(np.unique(positive_electrode[:, 1])) == 1, (
'the tophole electrode has more than one y-location'
)
# check the surface wire only has one y and one z location
surface_wire = self.mesh.gridFx[self.surface_wire, :]
assert len(np.unique(surface_wire[:, 1])) == 1, (
'the surface wire has more than one y-location'
)
assert len(np.unique(surface_wire[:, 2])) == 1, (
'the surface wire has more than one z-location'
)
return True
class TopCasingSrc(SurfaceGroundedSrc):
"""
Source that has one electrode coupled to the top of the casing, one return
electrode and a wire in between. This source is set up to live on faces.
:param discretize.CylMesh mesh: the cylindrical simulation mesh
:param CasingSimulations modelParameters: Casing parameters object
"""
def __init__(self, **kwargs):
super(TopCasingSrc, self).__init__(**kwargs)
self.src_a[0] = self.casing_a + self.mesh.hx.min()/2.
# @property
# def tophole_electrode(self):
# """
# Indices of the electrode that is grounded on the top of the casing
# """
# return self.positive_electrode
# if getattr(self, '_tophole_electrode', None) is None:
# mesh = self.mesh
# src_a = self.src_a
# src_b = self.src_b
# tophole_electrodex = (
# (mesh.gridFz[:, 0] <= self.casing_a + mesh.hx.min()) &
# (mesh.gridFz[:, 0] > self.casing_a)
# )
# tophole_electrodez = (
# (mesh.gridFz[:, 2] < src_a[2] + 1.5*mesh.hz.min()) &
# (mesh.gridFz[:, 2] >= src_a[2] - 0.5*mesh.hz.min())
# )
# self._tophole_electrode = tophole_electrodex & tophole_electrodez
# if getattr(mesh, 'isSymmetric', False) is False:
# tophole_electrodey = (
# (mesh.gridFz[:, 1] > src_a[1] - mesh.hy.min()) &
# (mesh.gridFz[:, 1] < src_a[1] + mesh.hy.min())
# )
# self._tophole_electrode = (
# self._tophole_electrode & tophole_electrodey
# )
# return self._tophole_electrode
class SourceList(BaseCasing):
"""
The source list
"""
filename = properties.String(
"filename to serialize the source list to",
default="SourceList.json"
)
sources = properties.List(
"list of casing sources",
properties.Instance(
"Instance of a BaseCasingSrc",
BaseCasingSrc
)
)
@property
def srcList(self):
if getattr(self, '_srcList', None) is None:
srcList = []
for src in self.sources:
srcList += src.srcList
self._srcList = srcList
return self._srcList
| mit |
rishikksh20/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 58 | 3692 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
Note that those results can be highly dependent on the value of
``learning_rate_init``.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'adam', 'learning_rate_init': 0.01}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
KDD-OpenSource/geox-young-academy | day-3/solutions/DA_exercises_IraBen.py | 1 | 3374 |
# coding: utf-8
# Linear Model
#
# \begin{equation}
# z_n=0.5z_{n-1}+\xi_{n-1}
# \end{equation}
# with
# $\xi_{n-1}\sim N(0,B)$ and $z_{0}\sim N(0,0.4)$
#
# Observations
# \begin{equation}
# y_n=z_n+\eta_n
# \end{equation}
# $\eta_{n-1}\sim N(0,R)$
#
# Kalman filter
#
# Forecast formulas:
# \begin{align}
# \hat{m}_{n+1}&=Am_n\\
# \hat{C}_{n+1}&=AC_nA^{\top}+B
# \end{align}
#
#
# Analysis formulas
# \begin{align}
# m_{n+1}&=\hat{m}_{n+1}-K_{n+1}(H\hat{m}_{n+1}-y_{n+1})\\
# C_{n+1}&=\hat{C}_{n+1}-K_{n+1}H\hat{C}_{n+1}
# \end{align}
#
# with Kalman gain
# \begin{equation}
# K_{n+1}=\hat{C}_{n+1}H^{\top}(R+H\hat{C}_{n+1}H^{\top})^{-1}
# \end{equation}
#
# Exercise: Please implement the Kalman filter for the example above
#
# In[70]:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import scipy.io as sio
import math
# In[92]:
n=100 # number of iterations
#initial
B=0.15 # Model Noise
R=0.02 # Observation Noise
A = 0.5 # Model Matrix
# creation of numpy arrays for variables
z= np.zeros(n)
m= np.zeros(n)
y= np.zeros(n)
C= np.zeros(n)
K= np.zeros(n)
# initial values for covariance and mean of the posterior
C[0]=0.4
m[0]=0
# initial values of model (truth) and observation data
z[0]=np.random.normal(loc=0.0, scale=0.4, size=None)
y[0]=z[0] + np.random.normal(loc=0.0, scale=R, size=None)
# In[93]:
# iteration through kalman filter
for i in range(0,n-1,1):
z[i+1]=A*z[i]+np.random.normal(loc=0.0, scale=B, size=None) # evolution of model
y[i+1]=z[i+1] + np.random.normal(loc=0.0, scale=R, size=None) # evolution of observ.
#forecast formulas
m[i+1]=A*m[i]
C[i+1]=A*A*C[i]+B
K[i+1]=C[i+1]/(R+C[i+1]) # kalman gain
# analysis formulas
m[i+1]=m[i+1]-K[i+1]*(m[i+1]-y[i+1])
C[i+1]=C[i+1]-K[i+1]*C[i+1]
# In[94]:
print m[-1]
print C[-1]
print np.mean(z)
print np.cov(z)
# In[100]:
# plot initial cond.
mu = 0
variance = 0.4
sigma = math.sqrt(variance)
x = np.linspace(mu-3*sigma,mu+3*sigma, 100)
plt.plot(x,mlab.normpdf(x, mu, sigma),"b", label="initial")
# plot posterior
mu_1 = m[-1]
variance_1 = C[-1]
sigma_1 = math.sqrt(variance_1)
x_1 = np.linspace(mu_1-3*sigma_1,mu_1+3*sigma_1, 100)
plt.plot(x_1,mlab.normpdf(x_1, mu_1, sigma_1),"r--", label="posterior")
plt.title("B= "+ str(B)+ ", R= "+ str(R) + "\n"+" number of steps= " + str(n))
plt.legend()
plt.grid()
plt.show()
# In[101]:
plt.plot(range(0,n), m, "r--", label= "posterior est.")
plt.plot(range(0,n), z, "b--", label= "model (truth)")
plt.plot(range(0,n), y, "g--", label="Observations")
plt.grid()
plt.legend()
plt.show()
#
#
# Lorenz equations
#
# \begin{align}
# \dot{x}&=\sigma(y-x)\\
# \dot{y}&=x(\rho-z)-y\\
# \dot{z}&=xy-\beta z
# \end{align}
#
# Ensemble Kalman Filter
# \begin{equation}
# z^i_{n+1}=\hat{z}^i_{n+1}-K_{n+1}(H\hat{z}^i_{n+1}-\tilde{y}^i_{n+1})
# \end{equation}
#
# \begin{align}
# m_{n}&\approx\frac{1}{M}\sum^M_{i=1}z^i_{n}\\
# C_{n}&\approx\frac{1}{M}\sum^M_{i=1}(z^i_{n}-m_{n})(z^i_{n}-m_{n})^{\top}
# \end{align}
#
# Exercise: Please implement the Ensemble Kalman filter for the Lorenz equation
#
#
# In[102]:
mat_contents = sio.loadmat("C:\Users\Sonya\Downloads\Data1.mat")
# In[ ]:
mat_contents
# Particle filter
#
#
# Exercise: Please implement the Particle filter with resampling for the Lorenz equation
#
#
#
# In[ ]:
| mit |
awalls-cx18/gnuradio | gr-filter/examples/reconstruction.py | 7 | 5011 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, digital
from gnuradio import filter
from gnuradio import blocks
import sys
import numpy
try:
from gnuradio import channels
except ImportError:
print("Error: Program requires gr-channels.")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
print("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).")
sys.exit(1)
fftlen = 8192
def main():
N = 10000
fs = 2000.0
Ts = 1.0 / fs
t = numpy.arange(0, N*Ts, Ts)
# When playing with the number of channels, be careful about the filter
# specs and the channel map of the synthesizer set below.
nchans = 10
# Build the filter(s)
bw = 1000
tb = 400
proto_taps = filter.firdes.low_pass_2(1, nchans*fs,
bw, tb, 80,
filter.firdes.WIN_BLACKMAN_hARRIS)
print("Filter length: ", len(proto_taps))
# Create a modulated signal
npwr = 0.01
data = numpy.random.randint(0, 256, N)
rrc_taps = filter.firdes.root_raised_cosine(1, 2, 1, 0.35, 41)
src = blocks.vector_source_b(data.astype(numpy.uint8).tolist(), False)
mod = digital.bpsk_mod(samples_per_symbol=2)
chan = channels.channel_model(npwr)
rrc = filter.fft_filter_ccc(1, rrc_taps)
# Split it up into pieces
channelizer = filter.pfb.channelizer_ccf(nchans, proto_taps, 2)
# Put the pieces back together again
syn_taps = [nchans*t for t in proto_taps]
synthesizer = filter.pfb_synthesizer_ccf(nchans, syn_taps, True)
src_snk = blocks.vector_sink_c()
snk = blocks.vector_sink_c()
# Remap the location of the channels
# Can be done in synth or channelizer (watch out for rotattions in
# the channelizer)
synthesizer.set_channel_map([ 0, 1, 2, 3, 4,
15, 16, 17, 18, 19])
tb = gr.top_block()
tb.connect(src, mod, chan, rrc, channelizer)
tb.connect(rrc, src_snk)
vsnk = []
for i in range(nchans):
tb.connect((channelizer,i), (synthesizer, i))
vsnk.append(blocks.vector_sink_c())
tb.connect((channelizer,i), vsnk[i])
tb.connect(synthesizer, snk)
tb.run()
sin = numpy.array(src_snk.data()[1000:])
sout = numpy.array(snk.data()[1000:])
# Plot original signal
fs_in = nchans*fs
f1 = pyplot.figure(1, figsize=(16,12), facecolor='w')
s11 = f1.add_subplot(2,2,1)
s11.psd(sin, NFFT=fftlen, Fs=fs_in)
s11.set_title("PSD of Original Signal")
s11.set_ylim([-200, -20])
s12 = f1.add_subplot(2,2,2)
s12.plot(sin.real[1000:1500], "o-b")
s12.plot(sin.imag[1000:1500], "o-r")
s12.set_title("Original Signal in Time")
start = 1
skip = 2
s13 = f1.add_subplot(2,2,3)
s13.plot(sin.real[start::skip], sin.imag[start::skip], "o")
s13.set_title("Constellation")
s13.set_xlim([-2, 2])
s13.set_ylim([-2, 2])
# Plot channels
nrows = int(numpy.sqrt(nchans))
ncols = int(numpy.ceil(float(nchans) / float(nrows)))
f2 = pyplot.figure(2, figsize=(16,12), facecolor='w')
for n in range(nchans):
s = f2.add_subplot(nrows, ncols, n+1)
s.psd(vsnk[n].data(), NFFT=fftlen, Fs=fs_in)
s.set_title("Channel {0}".format(n))
s.set_ylim([-200, -20])
# Plot reconstructed signal
fs_out = 2*nchans*fs
f3 = pyplot.figure(3, figsize=(16,12), facecolor='w')
s31 = f3.add_subplot(2,2,1)
s31.psd(sout, NFFT=fftlen, Fs=fs_out)
s31.set_title("PSD of Reconstructed Signal")
s31.set_ylim([-200, -20])
s32 = f3.add_subplot(2,2,2)
s32.plot(sout.real[1000:1500], "o-b")
s32.plot(sout.imag[1000:1500], "o-r")
s32.set_title("Reconstructed Signal in Time")
start = 0
skip = 4
s33 = f3.add_subplot(2,2,3)
s33.plot(sout.real[start::skip], sout.imag[start::skip], "o")
s33.set_title("Constellation")
s33.set_xlim([-2, 2])
s33.set_ylim([-2, 2])
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
potash/scikit-learn | benchmarks/bench_plot_ward.py | 117 | 1283 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect='auto', origin="lower")
plt.colorbar()
plt.contour(ratio, levels=[1, ], colors='k')
plt.yticks(range(len(n_features)), n_features.astype(np.int))
plt.ylabel('N features')
plt.xticks(range(len(n_samples)), n_samples.astype(np.int))
plt.xlabel('N samples')
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
| bsd-3-clause |
gsmcmullin/libswiftnav | python/docs/extensions/ipython_directive.py | 31 | 27191 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| lgpl-3.0 |
juditacs/hunspeech | emLid/attic/shifted_delta_cepstra.py | 1 | 9031 | import logging
from math import log
import os
import pickle
import numpy as np
import scipy.io.wavfile as wav
from sklearn.cluster import KMeans, AffinityPropagation, MeanShift, SpectralClustering, AgglomerativeClustering, DBSCAN, Birch, estimate_bandwidth
from sklearn.mixture import GMM
from sklearn.manifold import TSNE
from features import mfcc
def get_logger():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s %(module)s (%(lineno)s) %(levelname)s %(message)s"))
logger.addHandler(handler)
return logger
class ShiftedDeltaClusterer():
def __init__(self,n_clusters=6, n_jobs=-1):
"""
Cluster speech frames by language, based on shifted delta cepstral
features. Many clustering algorithms by sklearn are tried.
AffinityPropagation
not used, because its time complexity is quadratic in the number
of samples.
MeanShift
idea
the mean shift vector is computed for each centroid and points
towards a region of the maximum increase in the density
complexity
O(T*n*log(n)) in lower dimensions, with n the number of
samples and T the number of points. In higher dimensions the
complexity will tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds,
for example by using a higher value of min_bin_freq in the
get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable
than the mean shift algorithm and will be the bottleneck if it
is used.
AgglomerativeClustering:
"linkage" determines the metric used for the merge strategy
scalability
scalable, when when it is used jointly with a connectivity
matrix, but is computationally expensive when no connectivity
constraints are added between samples: it considers at each
step all the possible merges.
Birch
n_clusters
the final clustering step treats the subclusters from the
leaves as new samples. By default, this final clustering step
is not performed and the subclusters are returned as they are
TSNE
perplexity http://lvdmaaten.github.io/tsne/
"""
# TODO TSNE
homes = '/home' if True else '/mnt/store'
self.wav_dir = os.path.join(homes, 'hlt/Speech/Jewels/wav/')
self.project_dir = os.path.join(homes,
'makrai/data/speech/jewel')
n_comp = 1 # GMM
self.algos = [
("KMeans++", KMeans(n_clusters=n_clusters, init='k-means++',
n_jobs=n_jobs)),
("KMeans-rand", KMeans(n_clusters=n_clusters, init='random',
n_jobs=n_jobs)),
# TODO try preprocessing by PCA before KMeans
#("MeanShift", MeanShift(n_jobs=n_jobs, bandwidth=56.3255)),
# bandwidth estimated from points :16384 TODO
("SpectralClustering", SpectralClustering(n_clusters=n_clusters)),
# TODO connectivity matrix
# ("AgglomerativeClustering-ward", AgglomerativeClustering(n_clusters=n_clusters, linkage='ward')),
# ("AgglomerativeClustering-compl", AgglomerativeClustering(n_clusters=n_clusters, linkage='complete')),
# ("AgglomerativeClustering-avg", AgglomerativeClustering(n_clusters=n_clusters, linkage='average')),
("DBSCAN", DBSCAN()),
# algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'},
# optional, the algorithm to be used to find nearest neighbors
("GMM-spherical", GMM(n_components=n_comp,
covariance_type='spherical')),
("GMM-tied", GMM(n_components=n_comp, covariance_type='tied')),
("GMM-diag", GMM(n_components=n_comp, covariance_type='diag')),
("GMM-full", GMM(n_components=n_comp, covariance_type='full')),
("Birch", Birch()), # MemoryError
("TSNE", TSNE())
]
def get_sdc_all_tracks(self):
data_fn = os.path.join(self.project_dir, 'sdc_all_jewel.npy')
if os.path.isfile(data_fn):
self.sdc_all_speech = np.load(open(data_fn))
else:
logger.info(
'Computing shifted delta cepstra for all speech in {}'.format(
self.wav_dir))
self.sdc_all_speech = np.concatenate([self.shifted_delta_cepstra(
os.path.join(self.wav_dir, wav_fn))
for wav_fn in os.listdir(self.wav_dir)])
np.save(open(data_fn, mode='w'), self.sdc_all_speech)
def shifted_delta_cepstra(self, wav_fn, delta=1, shift=3, k_conc=3):
"""
:param
delta: represents the time advance and delay for the sdc
k_conc: is the number of blocks whose delta coefficients are concd
shift: is the time shift between consecutive blocks
Shifted delta cepstra are feature vectors created by concatenating
delta cepstra computed across multiple speech frames.
See the paper
PA Torres-Carrasquillo et al (2002)
Approaches to language identification using
Gaussian mixture models and Shifted delta cepstral features.
"""
(rate,sig) = wav.read(wav_fn)
mfcc_feats = mfcc(sig,rate)
# len(mfcc) == 39 == 3 * (12 cepstral + 1 energy)
# TODO include original cepstra as well?
delta_feats = mfcc_feats[delta:] - mfcc_feats[:-delta]
output_duration = delta_feats.shape[0] - shift*k_conc
shifted = np.zeros((output_duration,
(k_conc + 1) * delta_feats.shape[1]))
mfcc_dim = mfcc_feats.shape[1]
shifted[:,0:mfcc_dim] = mfcc_feats[:output_duration]
for i in xrange(output_duration):
shifted[i,mfcc_dim:] = delta_feats[i:i+k_conc*shift:shift,
:].reshape((1,-1))
logger.debug('{} --> {}'.format(mfcc_feats.shape, shifted.shape))
return shifted
def assign(self):
cluster_dir = '{}/cluster'.format(self.project_dir)
if not os.path.exists(cluster_dir):
os.mkdir(cluster_dir)
for algo_name, algo in self.algos:
try:
algo_dir = os.path.join(cluster_dir, algo_name)
classer = self.get_classer(algo_name, algo, algo_dir)
for wav_fn in os.listdir(self.wav_dir):
track_to_clust_fn = '{}.npy'.format(
os.path.join(algo_dir, os.path.splitext(wav_fn)[0]))
if os.path.isfile(track_to_clust_fn):
continue
logger.info('Assigning {} by {}'.format(wav_fn,algo_name))
#if hasattr(classer, 'predict'):
assignment = classer.predict(self.shifted_delta_cepstra(
os.path.join(self.wav_dir, wav_fn)))
np.savetxt(track_to_clust_fn, assignment, fmt='%i')
except Exception as e:
logger.exception(e)
def get_classer(self, algo_name, classer, algo_dir):
if not os.path.exists(algo_dir):
os.mkdir(algo_dir)
classer_fn = '{}_classer.npy'.format(os.path.join(algo_dir, algo_name))
trafoed_fn = '{}_trafoed.npy'.format(os.path.join(algo_dir, algo_name))
if os.path.isfile(classer_fn):
return pickle.load(open(classer_fn, mode='rb'))
else:
if algo_name == 'DBSCAN':
self.loop_estimate_bandwidth()
logger.info('clustering all speech with {}'.format(algo_name))
if hasattr(classer, 'fit') and hasattr(classer, 'predict'):
classer.fit(self.sdc_all_speech)
elif hasattr(classer, 'fit_transform'): # TSNE
all_speech_trafoed = classer.fit_transform(self.sdc_all_speech)
np.save(open(trafoed_fn, mode='wb'), all_speech_trafoed)
else: # DBSCAN
classer.fit_predict(self.sdc_all_speech)
logger.info(classer.get_params())
logger.info('dumping classifier')
pickle.dump(classer, open(classer_fn, mode='wb'))
return classer
def loop_estimate_bandwidth():
len_ = 4
while len_ < self.sdc_all_speech.shape[0]:
logging.info((len_,
estimate_bandwidth(self.sdc_all_speech[:len_])))
len_ *= 2
def main(self):
self.get_sdc_all_tracks()
self.assign()
if __name__ == "__main__":
logger = get_logger()
ShiftedDeltaClusterer().main()
| mit |
rishikksh20/scikit-learn | examples/svm/plot_svm_kernels.py | 96 | 2019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
cligs/tmw | tmw.py | 1 | 81506 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename: tmw.py
# Authors: christofs, daschloer
# Version 0.3.0 (2016-03-20)
##################################################################
### Topic Modeling Workflow (tmw) ###
##################################################################
# TODO: Use os.path.join everywhere for cross-platform compatibility.
import re
import os
import glob
import pandas as pd
from os import listdir
from os.path import join
from nltk.tokenize import word_tokenize
import glob
import subprocess
##################################################################
### PREPROCESSING ###
##################################################################
#################################
# tei5reader #
#################################
def tei5reader_fulldocs(inpath, outfolder):
"""Script for reading selected text from TEI P5 files."""
print("\nLaunched tei5reader_fulldocs.")
from lxml import etree
#print("Using LXML version: ", etree.LXML_VERSION)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
for file in glob.glob(inpath):
with open(file, "r"):
filename = os.path.basename(file)[:-4]
#print(filename[:5]) # = idno
### The following options may help with parsing errors.
#parser = etree.XMLParser(collect_ids=False, recover=True)
parser = etree.XMLParser(recover=True)
xml = etree.parse(file, parser)
### The TEI P5 files do have a default namespace.
namespaces = {'tei':'http://www.tei-c.org/ns/1.0'}
### Removes tags but conserves their text content.
etree.strip_tags(xml, "{http://www.tei-c.org/ns/1.0}seg")
### Removes elements and their text content.
#etree.strip_elements(xml, "speaker")
etree.strip_elements(xml, "{http://www.tei-c.org/ns/1.0}note")
#etree.strip_elements(xml, "stage")
etree.strip_elements(xml, "{http://www.tei-c.org/ns/1.0}head")
### XPath defining which text to select
xp_bodytext = "//tei:body//text()"
#xp_alltext = "//text()"
### Applying one of the above XPaths
text = xml.xpath(xp_bodytext, namespaces=namespaces)
text = "\n".join(text)
### Some cleaning up
text = re.sub("[ ]{1,20}", " ", text)
text = re.sub("\t\n", "\n", text)
text = re.sub("\n{1,10}", "\n", text)
text = re.sub("\n \n", "\n", text)
text = re.sub("\n.\n", "\n", text)
text = re.sub("[ ]{1,20}", " ", text)
outtext = str(text)
outfile = outfolder + filename + ".txt"
with open(outfile,"w") as output:
output.write(outtext)
print("Done.")
# Utility function for writing segments
def writesegment(segment, outfolder, filename, counter, mode="w"):
from os.path import join
segname = join(outfolder, filename + "§{:04d}".format(counter) + ".txt")
with open(segname, mode) as output:
output.write(' '.join(segment))
output.close()
#################################
# segmenter #
#################################
# Utility function for writing into files
def write(segment, file, mode = "w"):
with open(file, mode) as output:
output.write(' '.join(segment))
output.close()
# global segment counter
counter = 0
# global current segment size
currentsegmentsize = 0
# Utility function for writing segments
def writesegment(segment, outfolder, filename, target, tolerancefactor, preserveparagraphs):
from os.path import join
global currentsegmentsize
global counter
# ignore empty segments
if segment == ["\n"] or len(segment) < 1:
return
# workaround for easy inter line-spacing in case of paragraph removal for lines combined into one segment
if not preserveparagraphs and segment[-1] == "\n":
segment = segment[0:len(segment) - 1]
segment[-1] += " "
segname = join(outfolder, filename + "§{:04d}".format(counter) + ".txt")
relname = filename + "§{:04d}".format(counter) + ".txt"
# case: last segment is too small => fill with (slice of) new segment
if currentsegmentsize * tolerancefactor < target: # min size limit not reached => split
#split segment
wordsliceindex = target - currentsegmentsize
# if it's too big: slice!
if currentsegmentsize + len(segment) > target * tolerancefactor:
#print(relname + "\t Last segment size: " + str(currentsegmentsize) + "\t appending " + str(wordsliceindex) + "\t for a total of " + str((currentsegmentsize + wordsliceindex)))
write(segment[0:wordsliceindex], segname, "a")
currentsegmentsize += wordsliceindex
segment = segment[wordsliceindex:len(segment)]
# segment is filled. continue with next one
counter += 1
currentsegmentsize = 0
segname = join(outfolder, filename + "§{:04d}".format(counter) + ".txt")
relname = filename + "§{:04d}".format(counter) + ".txt"
if os.path.isfile(segname):
os.remove(segname)
# else just add text to current segment
else:
#print(relname + "\t Last segment size: " + str(currentsegmentsize) + "\t appending " + str(len(segment)) + "\t for a total of " + str((currentsegmentsize + len(segment))))
# segment fits so append
write(segment, segname, "a")
currentsegmentsize += len(segment) - segment.count("\n") # take possible segment end into account!
# done
return
# case: new segment is too big
# if segment > target: slice segment
while len(segment) > target * tolerancefactor:
#print(relname + "\t Last segment size: " + str(currentsegmentsize) + "\t appending " + str(target) + "\t for a total of " + str((currentsegmentsize + target)))
write(segment[0:target], segname)
segment = segment[target:len(segment)]
# segment is filled. continue with next one
counter += 1
currentsegmentsize = 0
segname = join(outfolder, filename + "§{:04d}".format(counter) + ".txt")
relname = filename + "§{:04d}".format(counter) + ".txt"
if os.path.isfile(segname):
os.remove(segname)
#print(relname + "\t New segment with size \t0")
# now size of segment is < target
if (len(segment) == 0):
#segment was perfectly sliced so we are done
return
# there's some part of segment left, write this into file
# if the remaining part is exceeding current segment's capacity start new segment
if currentsegmentsize + len(segment) > target * tolerancefactor:
# segment is filled. continue with next one
counter += 1
currentsegmentsize = 0
segname = join(outfolder, filename + "§{:04d}".format(counter) + ".txt")
relname = filename + "§{:04d}".format(counter) + ".txt"
if os.path.isfile(segname):
os.remove(segname)
#print(relname + "\t New segment with size \t0")
#print(relname + "\t Last segment size: " + str(currentsegmentsize) + "\t appending " + str(len(segment)) + "\t for a total of " + str((currentsegmentsize + len(segment))))
currentsegmentsize += len(segment) - segment.count("\n") # take possible segment end into account!
write(segment, segname, "a")
def segmenter(inpath, outfolder, target, sizetolerancefactor, preserveparagraphs = False):
"""Script for turning plain text files into equal-sized segments, with limited respect for paragraph boundaries."""
print("\nLaunched segmenter.")
from os.path import join
from nltk.tokenize import word_tokenize
if not os.path.exists(outfolder):
os.makedirs(outfolder)
global counter
global currentsegmentsize
# work on files in inpath
for relfile in glob.glob(inpath):
# get absolut filename
file = join(inpath, relfile)
with open(file, "r") as infile:
filename = os.path.basename(file)[:-4]
counter = 0
currentsegmentsize = 0
segname = join(outfolder, filename + "§{:06d}".format(counter) + ".txt")
relname = filename + "§{:06d}".format(counter) + ".txt"
if os.path.isfile(segname):
os.remove(segname)
# segment contains words assigned to the current segment
segment = []
# go through paragraphs one by one
for line in infile:
text = line
# (optional) remove punctuation, special characters and space-chains
#text = re.sub("[,;\.:!?¿\(\)—-]", " ", text)
text = re.sub("[\t\r\n\v\f]", " ", text)
text = re.sub("[ ]{1,9}", " ", text)
# tokenize text
words = word_tokenize(text)
words.append("\n")
writesegment(words, outfolder, filename, target, sizetolerancefactor, preserveparagraphs)
print("Done.")
#################################
# Binning #
#################################
def segments_to_bins(inpath, outfolder, binsnb):
"""Script for sorting text segments into bins."""
print("\nLaunched segments_to_bins.")
import math, sys
import os
import glob
from collections import Counter
import pandas as pd
### Define various objects for later use.
txtids = []
segids = []
filenames = []
binids = []
offset = sys.maxsize # used to track wrong segmenting (i.e. with segment numbering not starting with 0)
### Get filenames, text identifiers, segment identifiers.
for file in glob.glob(inpath):
filename = os.path.basename(file)[:-4]
txtid = filename[:6]
txtids.append(txtid)
segid = filename[-4:]
#print(filename, txtid, segid)
segids.append(segid)
offset = min(offset, int(segid))
#txtids_sr = pd.Series(txtids)
#segids_sr = pd.Series(segids)
if offset > 0:
print("Warning! Segment numbering should start at 0. Using offset: " + str(offset))
### For each text identifier, get number of segments.
txtids_ct = Counter(txtids)
sum_segnbs = 0
for txtid in txtids_ct:
segnb = txtids_ct[txtid]
#print(segnb)
sum_segnbs = sum_segnbs + segnb
#print(txtid, segnb)
print("Total number of segments: ", sum_segnbs)
for txtid in txtids_ct:
countsegs = txtids_ct[txtid]
if binsnb > int(countsegs):
print("Warning! You are expecting more bins than segments available! Bins will not be filled continuously!")
### Match each filename to the number of segments of the text.
bcount = dict()
for i in range(0, binsnb):
bcount[i] = 0
for file in glob.glob(inpath):
filename = os.path.basename(file)[:-4]
for txtid in txtids_ct:
if txtid in filename:
filename = filename + "$" + str(txtids_ct[txtid])
#print(filename)
### For each filename, compute and append bin number
txtid = filename[0:6]
segid = filename[7:11]
segnb = filename[12:]
#print(txtid,segid,segnb)
binid = ""
segprop = (int(segid) - offset) / int(segnb)
#print(txtid, segid, segnb, segprop)
binid = math.floor(segprop * binsnb)
if binid == binsnb: # avoid 1.0 beeing in seperate bin (should never happen due to offset!)
print("Error: Segment numbering is wrong! Continuing anyway...")
binid -= 1
bcount[binid] += 1
#print(segprop, binid)
filenames.append(filename[:11])
binids.append(binid)
filenames_sr = pd.Series(filenames, name="segmentID")
binids_sr = pd.Series(binids, name="binID")
files_and_bins = pd.concat([filenames_sr,binids_sr], axis=1)
print("chunks per bin: ", bcount)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
outfile = outfolder+"segs-and-bins.csv"
with open(outfile, "w") as outfile:
files_and_bins.to_csv(outfile, index=False)
#################################
# pretokenize #
#################################
import csv
def perform_multipleSubs(substitutionsFile, text):
"""Search and replace from a table of string pairs."""
## With code from http://stackoverflow.com/users/735204/emmett-j-butler
## Load table and turn into dict
with open(substitutionsFile, "r") as subsFile:
subs = csv.reader(subsFile)
subsDict = {rows[0]:rows[1] for rows in subs}
for key, value in subsDict.items():
text = re.sub(key, value, text)
#print(text)
return text
## Create a regular expression from the dictionary keys
#regex = re.compile("(%s)" % "|".join(map(re.escape, subsDict.keys())))
## For each match, look-up corresponding value in dictionary
#result = regex.sub(lambda mo: subsDict[mo.string[mo.start():mo.end()]], text)
#print(result)
def pretokenize(inpath, substitutionsFile, outfolder):
"""Deletion of unwanted elided and hyphenated words for better tokenization in TreeTagger. Optional."""
print("\nLaunched pretokenize.")
for file in glob.glob(inpath):
with open(file,"r") as text:
text = text.read()
text = perform_multipleSubs(substitutionsFile, text)
basename = os.path.basename(file)
cleanfilename = basename
if not os.path.exists(outfolder):
os.makedirs(outfolder)
with open(os.path.join(outfolder, cleanfilename),"w") as output:
output.write(text)
print("Done.")
#################################
# call_treetagger #
#################################
def call_treetagger(infolder, outfolder, tagger):
"""Function to call TreeTagger from Python"""
print("\nLaunched call_treetagger.")
import os
import glob
import subprocess
inpath = infolder + "*.txt"
infiles = glob.glob(inpath)
counter = 0
if not os.path.exists(outfolder):
os.makedirs(outfolder)
for infile in infiles:
#print(os.path.basename(infile))
counter+=1
outfile = outfolder + os.path.basename(infile)[:-4] + ".trt"
#print(outfile)
command = tagger + " < " + infile + " > " + outfile
subprocess.call(command, shell=True)
print("Files treated: ", counter)
print("Done.")
#################################
# make_lemmatext #
#################################
def make_lemmatext(inpath, outfolder, mode, stoplist_errors):
"""Function to extract lemmas from TreeTagger output."""
print("\nLaunched make_lemmatext.")
import re
import os
import glob
if not os.path.exists(outfolder):
os.makedirs(outfolder)
with open(stoplist_errors, "r") as infile:
stoplist = infile.read()
counter = 0
for file in glob.glob(inpath):
#print(os.path.basename(file))
with open(file,"r") as infile:
counter+=1
text = infile.read()
splittext = re.split("\n",text)
lemmata = []
for line in splittext:
splitline = re.split("\t",line)
if len(splitline) == 3:
lemma = splitline[2]
pos = splitline[1]
token = splitline[0]
## Select subset of lemmas according to parameter "mode"
if mode == "frN":
if "|" in lemma:
lemmata.append(token.lower())
elif "NOM" in pos and "|" not in lemma and "<unknown>" not in lemma:
lemmata.append(lemma.lower())
elif mode == "frNV":
if "|" in lemma:
lemmata.append(token.lower())
elif "NOM" in pos or "VER" in pos and "|" not in lemma and "<unknown>" not in lemma:
lemmata.append(lemma.lower())
elif mode == "frNVAA":
if "|" in lemma:
lemmata.append(token.lower())
elif "NOM" in pos or "VER" in pos or "ADJ" in pos or "ADV" in pos and "|" not in lemma and "<unknown>" not in lemma:
lemmata.append(lemma.lower())
elif mode == "esN":
if "|" in lemma and "NC" in pos:
lemmata.append(token.lower())
elif "NC" in pos and "|" not in lemma and "<unknown>" not in lemma:
lemmata.append(lemma.lower())
elif mode == "enNV":
if "NN" in pos or "VB" in pos and "|" not in lemma and "<unknown>" not in lemma:
lemmata.append(lemma.lower())
elif mode == "enN":
if "NN" in pos and "|" not in lemma and "<unknown>" not in lemma:
lemmata.append(lemma.lower())
## Continue with list of lemmata, but remove undesired leftover words
lemmata = ' '.join([word for word in lemmata if word not in stoplist])
lemmata = re.sub("[ ]{1,4}"," ", lemmata)
newfilename = os.path.basename(file)[:-4] + ".txt"
#print(outfolder, newfilename)
with open(os.path.join(outfolder, newfilename),"w") as output:
output.write(str(lemmata))
print("Files treated: ", counter)
print("Done.")
##################################################################
### TOPIC MODELLING WITH MALLET ###
##################################################################
# TODO: Concatenate two stoplists first, one for errors, one for deliberate ommissions.
#################################
# call_mallet_import #
#################################
def call_mallet_import(mallet_path, infolder,outfolder, outfile, stoplist_project):
"""Function to import text data into Mallet."""
print("\nLaunched call_mallet_import.")
import subprocess
import os
if not os.path.exists(outfolder):
os.makedirs(outfolder)
### Fixed parameters.
token_regex = "'\p{L}[\p{L}\p{P}]*\p{L}'"
### Building the command line command
command = mallet_path + " import-dir --input " + infolder + " --output " + outfile + " --keep-sequence --token-regex " + token_regex + " --remove-stopwords TRUE --stoplist-file " + stoplist_project
## Make the call
subprocess.call(command, shell=True)
print("Done.\n")
#################################
# call_mallet_modeling #
#################################
def call_mallet_modeling(mallet_path, inputfile,outfolder,numOfTopics,optimize_interval,num_iterations,num_top_words,doc_topics_max):
"""Function to perform topic modeling with Mallet."""
print("\nLaunched call_mallet_modeling.")
### Getting ready.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
### Fixed parameters
word_topics_counts_file = outfolder + "words-by-topics.txt"
topic_word_weights_file = outfolder + "word-weights.txt"
output_topic_keys = outfolder + "topics-with-words.csv"
output_doc_topics = outfolder + "topics-in-texts.csv"
output_topic_state = outfolder + "topic_state.gz"
### Constructing Mallet command from parameters.
command = mallet_path +" train-topics --input "+ inputfile +" --num-topics "+ numOfTopics +" --optimize-interval "+ optimize_interval +" --num-iterations " + num_iterations +" --num-top-words " + num_top_words +" --word-topic-counts-file "+ word_topics_counts_file + " --topic-word-weights-file "+ topic_word_weights_file +" --output-state topic-state.gz"+" --output-topic-keys "+ output_topic_keys +" --output-doc-topics "+ output_doc_topics +" --doc-topics-max "+ doc_topics_max + " --output-state " + output_topic_state
#print(command)
subprocess.call(command, shell=True)
print("Done.\n")
##################################################################
### POSTPROCESSING OF RAW DATA ###
##################################################################
##############################
# create_mastermatrix #
##############################
import numpy as np
import pandas as pd
import os
import glob
def get_metadata(metadatafile):
print(" Getting metadata...")
"""Read metadata file and create DataFrame."""
metadata = pd.DataFrame.from_csv(metadatafile, header=0, sep=",")
#print("metadata\n", metadata)
return metadata
def get_topicscores(topics_in_texts, numOfTopics):
"""Create a matrix of segments x topics, with topic score values, from Mallet output."""
print(" Getting topicscores...")
## Load Mallet output (strange format)
topicsintexts = pd.read_csv(topics_in_texts, header=None, skiprows=[0], sep="\t", index_col=0)
#topicsintexts = topicsintexts.iloc[0:100,] ### For testing only!!
#print("topicsintexts\n", topicsintexts.head())
listofsegmentscores = []
idnos = []
i = -1
## For each row, collect segment and idno
for row_index, row in topicsintexts.iterrows():
segment = row[1][-15:-4]
idno = row[1][-15:-11]
#print(segment, idno)
idnos.append(idno)
topics = []
scores = []
## For each segment, get the topic number and its score
i +=1
for j in range(1,numOfTopics,2):
k = j+1
topic = topicsintexts.iloc[i,j]
score = topicsintexts.iloc[i,k]
#score = round(score, 4) ## round off for smaller file.
topics.append(topic)
scores.append(score)
## Create dictionary of topics and scores for one segment
persegment = dict(zip(topics, scores))
segmentscores = pd.DataFrame.from_dict(persegment, orient="index")
segmentscores.columns = [segment]
segmentscores = segmentscores.T
listofsegmentscores.append(segmentscores)
## Putting it all together
topicscores = pd.concat(listofsegmentscores)
topicscores["segmentID"] = topicscores.index
topicscores.fillna(0,inplace=True)
#print("topicscores\n", topicscores)
return topicscores
def get_docmatrix(corpuspath):
"""Create a matrix containing segments with their idnos."""
print(" Getting docmatrix...")
## Create dataframe with filenames of segments and corresponding idnos.
segs = []
idnos = []
for file in glob.glob(corpuspath):
seg,ext = os.path.basename(file).split(".")
segs.append(seg)
idno = seg[0:6]
idnos.append(idno)
docmatrix = pd.DataFrame(segs)
docmatrix["idno"] = idnos
docmatrix.rename(columns={0:"segmentID"}, inplace=True)
#print("docmatrix\n", docmatrix)
return docmatrix
def merge_data(corpuspath, metadatafile, topics_in_texts, mastermatrixfile,
numOfTopics):
"""Merges the three dataframes into one mastermatrix."""
print(" Getting data...")
## Get all necessary data.
metadata = get_metadata(metadatafile)
docmatrix = get_docmatrix(corpuspath)
topicscores = get_topicscores(topics_in_texts, numOfTopics)
## For inspection only.
#print("Metadata\n", metadata.head())
#print("Docmatrix\n", docmatrix.head())
#print("topicscores\n", topicscores.head())
print(" Merging data...")
## Merge metadata and docmatrix, matching each segment to its metadata.
mastermatrix = pd.merge(docmatrix, metadata, how="inner", on="idno")
#print("mastermatrix: metadata and docmatrix\n", mastermatrix)
## Merge mastermatrix and topicscores, matching each segment to its topic scores.
#print(mastermatrix.columns)
#print(topicscores.columns)
#print(topicscores)
mastermatrix = pd.merge(mastermatrix, topicscores, on="segmentID", how="inner")
#print("mastermatrix: all three\n", mastermatrix.head())
return mastermatrix
def add_binData(mastermatrix, binDataFile):
print("- adding bin data...")
## Read the information about bins
binData = pd.read_csv(binDataFile, sep=",")
#print(binData)
## Merge existing mastermatrix and binData.
mastermatrix = pd.merge(mastermatrix, binData, how="inner", on="segmentID")
#print(mastermatrix)
return mastermatrix
def create_mastermatrix(corpuspath, outfolder, mastermatrixfile, metadatafile,
topics_in_texts, numOfTopics, useBins, binDataFile):
"""Builds the mastermatrix uniting all information about texts and topic scores."""
print("\nLaunched create_mastermatrix.")
print("(Warning: This is very memory-intensive and may take a while.)")
if not os.path.exists(outfolder):
os.makedirs(outfolder)
mastermatrix = merge_data(corpuspath, metadatafile, topics_in_texts,
mastermatrixfile, numOfTopics)
if useBins == True:
mastermatrix = add_binData(mastermatrix, binDataFile)
mastermatrix.to_csv(outfolder+mastermatrixfile, sep=",", encoding="utf-8")
print("Done. Saved mastermatrix. Segments and columns:", mastermatrix.shape)
################################
# calculate_averageTopicScores #
################################
def calculate_averageTopicScores(mastermatrixfile, targets, outfolder):
"""Function to calculate average topic scores based on the mastermatrix."""
print("\nLaunched calculate_averageTopicScores.")
if not os.path.exists(outfolder):
os.makedirs(outfolder)
with open(mastermatrixfile, "r") as infile:
mastermatrix = pd.DataFrame.from_csv(infile, header=0, sep=",")
## Calculate average topic scores for each target category
for target in targets:
grouped = mastermatrix.groupby(target, axis=0)
avg_topicscores = grouped.agg(np.mean)
if target != "year":
avg_topicscores = avg_topicscores.drop(["year"], axis=1)
if target != "binID":
avg_topicscores = avg_topicscores.drop(["binID"], axis=1)
#avg_topicscores = avg_topicscores.drop(["tei"], axis=1)
## Save grouped averages to CSV file for visualization.
resultfilename = "avgtopicscores_by-"+target+".csv"
resultfilepath = outfolder+resultfilename
## TODO: Some reformatting here, or adapt make_heatmaps.
avg_topicscores.to_csv(resultfilepath, sep=",", encoding="utf-8")
print(" Saved average topic scores for:", target)
print("Done.")
################################
# complexAverageTopicScores #
################################
def calculate_complexAverageTopicScores(mastermatrixfile, targets, outfolder):
"""Function to calculate average topic scores based on the mastermatrix."""
print("\nLaunched calculate_complexAverageTopicScores.")
if not os.path.exists(outfolder):
os.makedirs(outfolder)
with open(mastermatrixfile, "r") as infile:
mastermatrix = pd.DataFrame.from_csv(infile, header=0, sep=",")
## Calculate average topic scores for each target category
grouped = mastermatrix.groupby(targets, axis=0)
avg_topicscores = grouped.agg(np.mean)
if "year" not in targets:
avg_topicscores = avg_topicscores.drop(["year"], axis=1)
if "binID" not in targets:
avg_topicscores = avg_topicscores.drop(["binID"], axis=1)
#print(avg_topicscores)
## Save grouped averages to CSV file for visualization.
identifierstring = '+'.join(map(str, targets))
resultfilename = "complex-avgtopicscores_by-"+identifierstring+".csv"
resultfilepath = outfolder+resultfilename
avg_topicscores.to_csv(resultfilepath, sep=",", encoding="utf-8")
print("Done. Saved average topic scores for: "+identifierstring)
#################################
# save_firstWords #
#################################
def save_firstWords(topicWordFile, outfolder, filename):
"""Save a table of topics with their three most important words for each topic."""
print("Launched save_someFirstWords.")
with open(topicWordFile, "r") as infile:
firstWords = {}
topicWords = pd.read_csv(infile, sep="\t", header=None)
topicWords = topicWords.drop(1, axis=1)
topicWords = topicWords.iloc[:,1:2]
topics = topicWords.index.tolist()
words = []
for topic in topics:
topic = int(topic)
row = topicWords.loc[topic]
row = row[2].split(" ")
row = str(row[0]+"-"+row[1]+"-"+row[2]+" ("+str(topic)+")")
words.append(row)
firstWords = dict(zip(topics, words))
firstWordsSeries = pd.Series(firstWords, name="firstWords")
#firstWordsSeries.index.name = "topic"
#firstWordsSeries = firstWordsSeries.rename(columns = {'two':'new_name'})
firstWordsSeries.reindex_axis(["firstwords"])
#print(firstWordsSeries)
## Saving the file.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
outfile = outfolder + filename
with open(outfile, "w") as outfile:
firstWordsSeries.to_csv(outfile)
print("Done.")
#################################
# save_topicRanks #
#################################
def save_topicRanks(topicWordFile, outfolder, filename):
"""Save a list of topics with their rank by topic score."""
print("Launched save_topicRanks.")
with open(topicWordFile, "r") as infile:
topicRanks = pd.read_csv(infile, sep="\t", header=None)
topicRanks = topicRanks.drop(2, axis=1)
topicRanks.rename(columns={0:"Number"}, inplace=True)
topicRanks.rename(columns={1:"Score"}, inplace=True)
#topicRanks.sort(columns=["Score"], ascending=False, inplace=True)
topicRanks["Rank"] = topicRanks["Score"].rank(ascending=False)
#print(topicRanks.head())
## Saving the file.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
outfile = outfolder + filename
with open(outfile, "w") as outfile:
topicRanks.to_csv(outfile)
print("Done.")
##################################################################
### VISUALIZATION ###
##################################################################
import matplotlib.pyplot as plt
#################################
# make_wordle_from_mallet #
#################################
def make_wordle_from_mallet(word_weights_file,topics,words,outfolder,
font_path, dpi):
"""Generate wordles from Mallet output, using the wordcloud module."""
print("\nLaunched make_wordle_from_mallet.")
from wordcloud import WordCloud
import random
if not os.path.exists(outfolder):
os.makedirs(outfolder)
def read_mallet_output(word_weights_file):
"""Reads Mallet output (topics with words and word weights) into dataframe."""
word_scores = pd.read_table(word_weights_file, header=None, sep="\t")
word_scores = word_scores.sort(columns=[0,2], axis=0, ascending=[True, False])
word_scores_grouped = word_scores.groupby(0)
#print(word_scores.head())
return word_scores_grouped
def get_wordlewords(words,topic):
"""Transform Mallet output for wordle generation."""
topic_word_scores = read_mallet_output(word_weights_file).get_group(topic)
top_topic_word_scores = topic_word_scores.iloc[0:words]
topic_words = top_topic_word_scores.loc[:,1].tolist()
word_scores = top_topic_word_scores.loc[:,2].tolist()
wordlewords = ""
j = 0
for word in topic_words:
word = word
score = word_scores[j]
j += 1
wordlewords = wordlewords + ((word + " ") * score)
return wordlewords
def get_color_scale(word, font_size, position, orientation, font_path, random_state=None):
""" Create color scheme for wordle."""
return "hsl(245, 58%, 25%)" # Default. Uniform dark blue.
#return "hsl(0, 00%, %d%%)" % random.randint(80, 100) # Greys for black background.
#return "hsl(221, 65%%, %d%%)" % random.randint(30, 35) # Dark blues for white background
def get_topicRank(topic, topicRanksFile):
#print("getting topic rank.")
with open(topicRanksFile, "r") as infile:
topicRanks = pd.read_csv(infile, sep=",", index_col=0)
rank = int(topicRanks.iloc[topic]["Rank"])
return rank
def make_wordle_from_mallet(word_weights_file,
numOfTopics,words,outfolder,
topicRanksFile,
font_path, dpi):
"""Generate wordles from Mallet output, using the wordcloud module."""
print("\nLaunched make_wordle_from_mallet.")
for topic in range(0,numOfTopics):
## Gets the text for one topic.
text = get_wordlewords(words, word_weights_file, topic)
wordcloud = WordCloud(font_path=font_path, width=600, height=400, background_color="white", margin=4).generate(text)
default_colors = wordcloud.to_array()
rank = get_topicRank(topic, topicRanksFile)
figure_title = "topic "+ str(topic) + " ("+str(rank)+"/"+str(numOfTopics)+")"
plt.imshow(wordcloud.recolor(color_func=get_color_scale, random_state=3))
plt.imshow(default_colors)
plt.imshow(wordcloud)
plt.title(figure_title, fontsize=30)
plt.axis("off")
## Saving the image file.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
figure_filename = "wordle_tp"+"{:03d}".format(topic) + ".png"
plt.savefig(outfolder + figure_filename, dpi=dpi)
plt.close()
print("Done.")
def crop_images(inpath, outfolder, left, upper, right, lower):
""" Function to crop wordle files."""
print("Launched crop_images.")
from PIL import Image
import glob
import os
counter = 0
for file in glob.glob(inpath):
original = Image.open(file)
filename = os.path.basename(file)[:-4]+"x.png"
box = (left, upper, right, lower)
cropped = original.crop(box)
cropped.save(outfolder + filename)
counter +=1
print("Done. Images cropped:" , counter)
#################################
# plot_topTopics #
#################################
# TODO: Move this one one level up if several plotting functions use it.
def get_firstWords(firstWordsFile):
"""Function to load list of top topic words into dataframe."""
#print(" Getting firstWords.")
with open(firstWordsFile, "r") as infile:
firstWords = pd.read_csv(infile, header=None)
firstWords.drop(0, axis=1, inplace=True)
firstWords.rename(columns={1:"topicwords"}, inplace=True)
#print(firstWords)
return(firstWords)
def get_targetItems(average, targetCategory):
"""Get a list of items included in the target category."""
print(" Getting targetItems for: "+targetCategory)
with open(average, "r") as infile:
averageTopicScores = pd.DataFrame.from_csv(infile, sep=",")
#print(averageTopicScores.head())
targetItems = list(averageTopicScores.index.values)
#print(targetItems)
return(targetItems)
def get_dataToPlot(average, firstWordsFile, mode, topTopicsShown, item):
"""From average topic score data, select data to be plotted."""
#print(" Getting dataToPlot.")
with open(average, "r") as infile:
## Read the average topic score data
allData = pd.DataFrame.from_csv(infile, sep=",")
if mode == "normalized": # mean normalization
colmeans = allData.mean(axis=0)
allData = allData / colmeans
elif mode == "zscores": # zscore transformation
colmeans = allData.mean(axis=0) # ???
colstd = allData.std(axis=0) #std for each topic
allData = (allData - colmeans) / colstd # = zscore transf.
elif mode == "absolute": # absolute values
allData = allData
allData = allData.T
## Add top topic words to table for display later
firstWords = get_firstWords(firstWordsFile)
allData["firstWords"] = firstWords.iloc[:,0].values
## Create subset of data based on target.
dataToPlot = allData[[item,"firstWords"]]
dataToPlot = dataToPlot.sort(columns=item, ascending=False)
dataToPlot = dataToPlot[0:topTopicsShown]
dataToPlot = dataToPlot.set_index("firstWords")
#print(dataToPlot)
return dataToPlot
def create_barchart_topTopics(dataToPlot, targetCategory, mode, item,
fontscale, height, dpi, outfolder):
"""Function to make a topTopics barchart."""
print(" Creating plot for: "+str(item))
## Doing the plotting.
dataToPlot.plot(kind="bar", legend=None)
plt.setp(plt.xticks()[1], rotation=90, fontsize = 11)
if mode == "normalized":
plt.title("Top-distinctive Topics für: "+str(item), fontsize=15)
plt.ylabel("normalized scores", fontsize=13)
elif mode == "absolute":
plt.title("Top-wichtigste Topics für: "+str(item), fontsize=15)
plt.ylabel("absolute scores", fontsize=13)
plt.xlabel("Topics", fontsize=13)
plt.tight_layout()
if height != 0:
plt.ylim((0.000,height))
## Saving the plot to disk.
outfolder = outfolder+targetCategory+"/"
if not os.path.exists(outfolder):
os.makedirs(outfolder)
figure_filename = outfolder+"tT_"+mode+"-"+str(item)+".png"
plt.savefig(figure_filename, dpi=dpi)
plt.close()
def plot_topTopics(averageDatasets, firstWordsFile, numOfTopics,
targetCategories, mode, topTopicsShown, fontscale,
height, dpi, outfolder):
"""For each item in a category, plot the top n topics as a barchart."""
print("Launched plot_topTopics.")
for average in glob.glob(averageDatasets):
for targetCategory in targetCategories:
if targetCategory in average:
targetItems = get_targetItems(average, targetCategory)
for item in targetItems:
dataToPlot = get_dataToPlot(average, firstWordsFile, mode, topTopicsShown, item)
create_barchart_topTopics(dataToPlot, targetCategory, mode, item, fontscale, height, dpi, outfolder)
print("Done.")
#################################
# plot_topItems #
#################################
def get_topItems_firstWords(firstWordsFile, topic):
"""Function to load list of top topic words into dataframe."""
#print(" Getting firstWords.")
with open(firstWordsFile, "r") as infile:
firstWords = pd.DataFrame.from_csv(infile, header=None)
firstWords.columns = ["firstWords"]
# Only the words for one topic are needed.
firstWords = firstWords.iloc[topic]
firstWords = firstWords[0]
return(firstWords)
def get_topItems_dataToPlot(average, firstWordsFile, topItemsShown, topic):
"""From average topic score data, select data to be plotted."""
#print(" Getting dataToPlot.")
with open(average, "r") as infile:
## Read the average topic score data
allData = pd.DataFrame.from_csv(infile, sep=",")
allData = allData.T
## Create subset of data based on target.
dataToPlot = allData.iloc[topic,:]
dataToPlot = dataToPlot.order(ascending=False)
dataToPlot = dataToPlot[0:topItemsShown]
#print(dataToPlot)
return dataToPlot
def create_topItems_barchart(dataToPlot, firstWords, targetCategory, topic,
fontscale, height, dpi, outfolder):
"""Function to make a topItems barchart."""
print(" Creating plot for topic: "+str(topic))
## Doing the plotting.
dataToPlot.plot(kind="bar", legend=None)
plt.title("Top "+targetCategory+" für topic: "+str(firstWords), fontsize=15)
plt.ylabel("Scores", fontsize=13)
plt.xlabel(targetCategory, fontsize=13)
plt.setp(plt.xticks()[1], rotation=90, fontsize = 11)
if height != 0:
plt.ylim((0.000,height))
plt.tight_layout()
## Saving the plot to disk.
outfolder = outfolder+targetCategory+"/"
if not os.path.exists(outfolder):
os.makedirs(outfolder)
figure_filename = outfolder+"tI_by-"+targetCategory+"-{:03d}".format(topic)+".png"
plt.savefig(figure_filename, dpi=dpi)
plt.close()
def plot_topItems(averageDatasets,
outfolder,
firstWordsFile,
numOfTopics,
targetCategories,
topItemsShown,
fontscale,
height,
dpi):
"""Visualize topic score distribution data as barchart. """
print("Launched plot_topItems")
for average in glob.glob(averageDatasets):
for targetCategory in targetCategories:
if targetCategory in average:
print(" Plotting for: "+targetCategory)
topics = list(range(0,numOfTopics))
for topic in topics:
firstWords = get_topItems_firstWords(firstWordsFile,
topic)
dataToPlot = get_topItems_dataToPlot(average,
firstWordsFile,
topItemsShown,
topic)
create_topItems_barchart(dataToPlot,
firstWords,
targetCategory,
topic,
fontscale,
height,
dpi,
outfolder)
print("Done.")
#################################
# topic_distribution_heatmap #
#################################
import seaborn as sns
# TODO: This next function could be merged with above.
def get_heatmap_firstWords(firstWordsFile):
"""Function to load list of top topic words into dataframe."""
#print(" Getting firstWords.")
with open(firstWordsFile, "r") as infile:
firstWords = pd.read_csv(infile, header=None)
firstWords.drop(0, axis=1, inplace=True)
firstWords.rename(columns={1:"topicwords"}, inplace=True)
#print(firstWords)
return(firstWords)
def get_heatmap_dataToPlot(average, mode, firstWordsFile, topTopicsShown,
numOfTopics):
"""From average topic score data, select data to be plotted."""
print("- getting dataToPlot...")
with open(average, "r") as infile:
## Read the average topic score data
allScores = pd.DataFrame.from_csv(infile, sep=",")
if mode == "normalized": # mean normalization
colmeans = allScores.mean(axis=0)
allScores = allScores / colmeans
elif mode == "zscores": # zscore transformation
colmeans = allScores.mean(axis=0) # mean for each topic
allstd = allScores.std(axis=0) #std for entire df
allScores = (allScores - colmeans) / allstd # = zscore transf.
elif mode == "absolute": # absolute values
allScores = allScores
allScores = allScores.T
## Add top topic words to table for display later
firstWords = get_heatmap_firstWords(firstWordsFile)
allScores.index = allScores.index.astype(np.int64)
allScores = pd.concat([allScores, firstWords], axis=1, join="inner")
#print(allScores)
## Remove undesired columns: subsubgenre
#allScores = allScores.drop("adventure", axis=1)
#allScores = allScores.drop("autobiographical", axis=1)
#allScores = allScores.drop("blanche", axis=1)
#allScores = allScores.drop("education", axis=1)
#allScores = allScores.drop("fantastic", axis=1)
#allScores = allScores.drop("fantastique", axis=1)
#allScores = allScores.drop("historical", axis=1)
#allScores = allScores.drop("n.av.", axis=1)
#allScores = allScores.drop("nouveau-roman", axis=1)
#allScores = allScores.drop("sciencefiction", axis=1)
#allScores = allScores.drop("social", axis=1)
#allScores = allScores.drop("other", axis=1)
#allScores = allScores.drop("espionnage", axis=1)
#allScores = allScores.drop("thriller", axis=1)
#allScores = allScores.drop("neopolar", axis=1)
## Remove undesired columns: protagonist-policier
#allScores = allScores.drop("crminal", axis=1)
#allScores = allScores.drop("mixed", axis=1)
#allScores = allScores.drop("witness", axis=1)
#allScores = allScores.drop("criminel", axis=1)
#allScores = allScores.drop("detection", axis=1)
#allScores = allScores.drop("victime", axis=1)
#allScores = allScores.drop("n.av.", axis=1)
## Sort by standard deviation
standardDeviations = allScores.std(axis=1)
standardDeviations.name = "std"
allScores.index = allScores.index.astype(np.int64)
allScores = pd.concat([allScores, standardDeviations], axis=1)
allScores = allScores.sort(columns="std", axis=0, ascending=False)
allScores = allScores.drop("std", axis=1)
someScores = allScores[0:topTopicsShown]
someScores = someScores.drop(0, axis=1)
## Necessary step to align dtypes of indexes for concat.
someScores.index = someScores.index.astype(np.int64)
#print("dtype firstWords: ", type(firstWords.index))
#print("dtype someScores: ", type(someScores.index))
#print("\n==intersection==\n",someScores.index.intersection(firstWords.index))
## Add top topic words to table for display later
firstWords = get_heatmap_firstWords(firstWordsFile)
dataToPlot = pd.concat([someScores, firstWords], axis=1, join="inner")
dataToPlot = dataToPlot.set_index("topicwords")
#print(dataToPlot)
## Optionally, limit display to part of the columns
#dataToPlot = dataToPlot.iloc[:,0:40]
#print(dataToPlot)
return dataToPlot
def create_distinctiveness_heatmap(dataToPlot,
topTopicsShown,
targetCategory,
mode,
sorting,
fontscale,
dpi,
outfolder):
sns.set_context("poster", font_scale=fontscale)
sns.heatmap(dataToPlot, annot=False, cmap="YlOrRd", square=False)
# Nice: bone_r, copper_r, PuBu, OrRd, GnBu, BuGn, YlOrRd
plt.title("Verteilung der Topic Scores", fontsize=20)
plt.xlabel(targetCategory, fontsize=16)
plt.ylabel("Top topics (stdev)", fontsize=16)
plt.setp(plt.xticks()[1], rotation=90, fontsize = 12)
plt.tight_layout()
## Saving the plot to disk.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
figure_filename = outfolder+"dist-heatmap_by-"+str(targetCategory)+".png"
plt.savefig(figure_filename, dpi=dpi)
plt.close()
def plot_distinctiveness_heatmap(averageDatasets,
firstWordsFile,
mode,
sorting,
outfolder,
targetCategories,
numOfTopics,
topTopicsShown,
fontscale,
dpi):
"""Visualize topic score distribution data as heatmap. """
print("Launched plot_distinctiveness_heatmap.")
for average in glob.glob(averageDatasets):
for targetCategory in targetCategories:
if targetCategory in average and targetCategory != "segmentID":
print("- working on: "+targetCategory)
dataToPlot = get_heatmap_dataToPlot(average,
mode,
sorting,
firstWordsFile,
topTopicsShown,
numOfTopics)
create_distinctiveness_heatmap(dataToPlot,
topTopicsShown,
targetCategory,
mode,
sorting,
fontscale,
dpi,
outfolder)
print("Done.")
#################################
# plot_topicsOverTime #
#################################
def get_overTime_firstWords(firstWordsFile):
"""Function to load list of top topic words into dataframe."""
#print(" Getting firstWords.")
with open(firstWordsFile, "r") as infile:
firstWords = pd.read_csv(infile, header=None)
firstWords.drop(0, axis=1, inplace=True)
firstWords.rename(columns={1:"topicwords"}, inplace=True)
firstWords.index = firstWords.index.astype(np.int64)
#print(firstWords)
return(firstWords)
def get_overTime_dataToPlot(average, firstWordsFile, entriesShown, topics):
"""Function to build a dataframe with all data necessary for plotting."""
#print(" Getting data to plot.")
with open(average, "r") as infile:
allScores = pd.DataFrame.from_csv(infile, sep=",")
allScores = allScores.T
#print(allScores.head())
## Select the data for selected topics
someScores = allScores.loc[topics,:]
someScores.index = someScores.index.astype(np.int64)
## Add information about the firstWords of topics
firstWords = get_overTime_firstWords(firstWordsFile)
dataToPlot = pd.concat([someScores, firstWords], axis=1, join="inner")
dataToPlot = dataToPlot.set_index("topicwords")
dataToPlot = dataToPlot.T
#print(dataToPlot)
return dataToPlot
def create_overTime_lineplot(dataToPlot, outfolder, fontscale, topics, dpi, height):
"""This function does the actual plotting and saving to disk."""
print(" Creating lineplot for selected topics.")
## Plot the selected data
dataToPlot.plot(kind="line", lw=3, marker="o")
plt.title("Entwicklung der Topic Scores", fontsize=20)
plt.ylabel("Topic scores (absolut)", fontsize=16)
plt.xlabel("Jahrzehnte", fontsize=16)
plt.setp(plt.xticks()[1], rotation=0, fontsize = 14)
if height != 0:
plt.ylim((0.000,height))
## Saving the plot to disk.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
## Format the topic information for display
topicsLabel = "-".join(str(topic) for topic in topics)
figure_filename = outfolder+"lineplot-"+topicsLabel+".png"
plt.savefig(figure_filename, dpi=dpi)
plt.close()
def create_overTime_areaplot(dataToPlot, outfolder, fontscale, topics, dpi):
"""This function does the actual plotting and saving to disk."""
print(" Creating areaplot for selected topics.")
## Turn absolute data into percentages.
dataToPlot = dataToPlot.apply(lambda c: c / c.sum() * 100, axis=1)
## Plot the selected data
dataToPlot.plot(kind="area")
plt.title("Entwicklung der Topic Scores", fontsize=20)
plt.ylabel("Topic scores (anteilig zueinander)", fontsize=16)
plt.xlabel("Jahrzehnte", fontsize=16)
plt.ylim((0,100))
plt.setp(plt.xticks()[1], rotation=0, fontsize = 14)
## Saving the plot to disk.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
## Format the topic information for display
topicsLabel = "-".join(str(topic) for topic in topics)
figure_filename = outfolder+"areaplot-"+topicsLabel+".png"
plt.savefig(figure_filename, dpi=dpi)
plt.close()
def plot_topicsOverTime(averageDatasets, firstWordsFile, outfolder,
numOfTopics, fontscale, dpi, height,
mode, topics):
"""Function to plot development of topics over time using lineplots or areaplots."""
print("Launched plot_topicsOverTime.")
if mode == "line":
for average in glob.glob(averageDatasets):
if "decade" in average:
entriesShown = numOfTopics
dataToPlot = get_overTime_dataToPlot(average, firstWordsFile,
entriesShown, topics)
create_overTime_lineplot(dataToPlot, outfolder, fontscale,
topics, dpi, height)
elif mode == "area":
for average in glob.glob(averageDatasets):
if "decade" in average:
entriesShown = numOfTopics
dataToPlot = get_overTime_dataToPlot(average, firstWordsFile,
entriesShown, topics)
create_overTime_areaplot(dataToPlot, outfolder, fontscale,
topics, dpi)
print("Done.")
###########################
## topicClustering ###
###########################
# TOOD: Add figsize and orientation parameters.
# TODO: Add "firstwords" as leaf labels instead of topic numbers.
import scipy.cluster as sc
def get_topWordScores(wordWeightsFile, WordsPerTopic):
"""Reads Mallet output (topics with words and word weights) into dataframe."""
print("- getting topWordScores...")
wordScores = pd.read_table(wordWeightsFile, header=None, sep="\t")
wordScores = wordScores.sort(columns=[0,2], axis=0, ascending=[True, False])
topWordScores = wordScores.groupby(0).head(WordsPerTopic)
#print(topWordScores)
return topWordScores
def build_scoreMatrix(topWordScores, topicsToUse):
"""Transform Mallet output for wordle generation."""
print("- building score matrix...")
topWordScores = topWordScores.groupby(0)
listOfWordScores = []
for topic,data in topWordScores:
if topic in list(range(0,topicsToUse)):
words = data.loc[:,1].tolist()
scores = data.loc[:,2].tolist()
wordScores = dict(zip(words, scores))
wordScores = pd.Series(wordScores, name=topic)
listOfWordScores.append(wordScores)
scoreMatrix = pd.concat(listOfWordScores, axis=1)
scoreMatrix = scoreMatrix.fillna(10)
#print(scoreMatrix.head)
scoreMatrix = scoreMatrix.T
return scoreMatrix
def perform_topicClustering(scoreMatrix, method, metric, wordsPerTopic, outfolder):
print("- performing clustering...")
distanceMatrix = sc.hierarchy.linkage(scoreMatrix, method=method, metric=metric)
#print(distanceMatrix)
plt.figure(figsize=(25,10))
sc.hierarchy.dendrogram(distanceMatrix)
plt.setp(plt.xticks()[1], rotation=90, fontsize = 6)
plt.title("Topic-Clustering Dendrogramm", fontsize=20)
plt.ylabel("Distanz", fontsize=16)
plt.xlabel("Parameter: "+method+" clustering - "+metric+" distance - "+str(wordsPerTopic)+" words", fontsize=16)
plt.tight_layout()
## Saving the image file.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
figure_filename = "topic-clustering_"+metric+"-"+method+"-"+str(wordsPerTopic)+"words"+".png"
plt.savefig(outfolder + figure_filename, dpi=600)
plt.close()
def topicClustering(wordWeightsFile, wordsPerTopic, outfolder,
methods, metrics, topicsToUse):
"""Display dendrogram of topic similarity using clustering."""
print("\nLaunched topicClustering.")
## Gets the necessary data: the word scores for each topic
topWordScores = get_topWordScores(wordWeightsFile, wordsPerTopic)
## Turn the data into a dataframe for further processing
scoreMatrix = build_scoreMatrix(topWordScores, topicsToUse)
## Do clustering on the dataframe
for method in methods:
for metric in metrics:
perform_topicClustering(scoreMatrix, method, metric, wordsPerTopic, outfolder)
print("Done.")
###########################
## itemClustering ###
###########################
# TOOD: Add orientation to parameters.
import scipy.cluster as sc
def build_itemScoreMatrix(averageDatasets, targetCategory,
topicsPerItem, sortingCriterium):
"""Reads Mallet output (topics with words and word weights) into dataframe."""
print("- getting topWordScores...")
for averageFile in glob.glob(averageDatasets):
if targetCategory in averageFile:
itemScores = pd.read_table(averageFile, header=0, index_col=0, sep=",")
itemScores = itemScores.T
if sortingCriterium == "std":
itemScores["sorting"] = itemScores.std(axis=1)
elif sortingCriterium == "mean":
itemScores["sorting"] = itemScores.mean(axis=1)
itemScores = itemScores.sort(columns=["sorting"], axis=0, ascending=False)
itemScoreMatrix = itemScores.iloc[0:topicsPerItem,0:-1]
itemScoreMatrix = itemScoreMatrix.T
"""
itemScoreMatrix = itemScoreMatrix.drop("Allais", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Audoux", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Barbara", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Barjavel", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Beckett", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Bernanos", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Bosco", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Bourget", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Butor", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Camus", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Carco", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Celine", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Colette", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Darien", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Daudet", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Delly", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Dombre", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Duras", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("ErckChat", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("FevalPP", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("MduGard", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Mirbeau", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Ohnet", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Perec", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Proust", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Queneau", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Rodenbach", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Rolland", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Roussel", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("SaintExupery", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Sand", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Aimard", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("AimardAuriac", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Balzac", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Bon", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Echenoz", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Flaubert", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Fleuriot", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("France", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Galopin", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Gary", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("GaryAjar", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("GaryBogat", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("GarySinibaldi", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Gautier", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Giono", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Gouraud", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Huysmans", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Hugo", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("LeClezio", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Loti", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Malot", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Mary", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Maupassant", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Modiano", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("RobbeGrillet", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Stolz", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Sue", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Tournier", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Verne", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Vian", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("VianSullivan", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Zola", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Malraux", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Simon", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("LeRouge", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("LeRougeGuitton", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Toussaint", axis=0)
itemScoreMatrix = itemScoreMatrix.drop("Khadra", axis=0)
"""
#print(itemScoreMatrix)
return itemScoreMatrix
def perform_itemClustering(itemScoreMatrix, targetCategory, method, metric,
topicsPerItem, sortingCriterium, figsize, outfolder):
print("- performing clustering...")
## Perform the actual clustering
itemDistanceMatrix = sc.hierarchy.linkage(itemScoreMatrix, method=method, metric=metric)
## Plot the distance matrix as a dendrogram
plt.figure(figsize=figsize) # TODO: this could be a a parameter.
itemLabels = itemScoreMatrix.index.values
sc.hierarchy.dendrogram(itemDistanceMatrix, labels=itemLabels, orientation="top")
## Format items labels to x-axis tick labels
plt.setp(plt.xticks()[1], rotation=90, fontsize = 14)
plt.title("Item Clustering Dendrogramm: "+targetCategory, fontsize=20)
plt.ylabel("Distance", fontsize=16)
plt.xlabel("Parameter: "+method+" clustering - "+metric+" distance - "+str(topicsPerItem)+" topics", fontsize=16)
plt.tight_layout()
## Save the image file.
print("- saving image file.")
if not os.path.exists(outfolder):
os.makedirs(outfolder)
figure_filename = "item-clustering_"+targetCategory+"_"+metric+"-"+method+"-"+sortingCriterium+"-"+str(topicsPerItem)+"topics"+".jpg"
plt.savefig(outfolder + figure_filename, dpi=600)
plt.close()
def itemClustering(averageDatasets, figsize, outfolder, topicsPerItem,
targetCategories, methods, metrics, sortingCriterium):
"""Display dendrogram of topic-based item similarity using clustering."""
print("\nLaunched itemClustering.")
for targetCategory in targetCategories:
## Load topic scores per itema and turn into score matrix
itemScoreMatrix = build_itemScoreMatrix(averageDatasets, targetCategory,
topicsPerItem, sortingCriterium)
## Do clustering on the dataframe
for method in methods:
for metric in metrics:
perform_itemClustering(itemScoreMatrix, targetCategory,
method, metric, topicsPerItem,
sortingCriterium, figsize, outfolder)
print("Done.")
###########################
## simple progression ###
###########################
def get_progression_firstWords(firstWordsFile):
"""Function to load list of top topic words into dataframe."""
#print(" Getting firstWords.")
with open(firstWordsFile, "r") as infile:
firstWords = pd.read_csv(infile, header=None)
firstWords.drop(0, axis=1, inplace=True)
firstWords.rename(columns={1:"topicwords"}, inplace=True)
firstWords.index = firstWords.index.astype(np.int64)
#print(firstWords)
return(firstWords)
def get_selSimpleProgression_dataToPlot(averageDataset, firstWordsFile,
entriesShown, topics):
"""Function to build a dataframe with all data necessary for plotting."""
print("- getting data to plot...")
with open(averageDataset, "r") as infile:
allScores = pd.DataFrame.from_csv(infile, sep=",")
allScores = allScores.T
#print(allScores.head())
## Select the data for selected topics
someScores = allScores.loc[topics,:]
someScores.index = someScores.index.astype(np.int64)
## Add information about the firstWords of topics
firstWords = get_progression_firstWords(firstWordsFile)
dataToPlot = pd.concat([someScores, firstWords], axis=1, join="inner")
dataToPlot = dataToPlot.set_index("topicwords")
dataToPlot = dataToPlot.T
#print(dataToPlot)
return dataToPlot
def create_selSimpleProgression_lineplot(dataToPlot, outfolder, fontscale,
topics, dpi, height):
"""This function does the actual plotting and saving to disk."""
print("- creating the plot...")
## Plot the selected data
dataToPlot.plot(kind="line", lw=3, marker="o")
plt.title("Entwicklung ausgewählter Topics über den Textverlauf", fontsize=20)
plt.ylabel("Topic scores (absolut)", fontsize=16)
plt.xlabel("Textabschnitte", fontsize=16)
plt.setp(plt.xticks()[1], rotation=0, fontsize = 14)
if height != 0:
plt.ylim((0.000,height))
## Saving the plot to disk.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
## Format the topic information for display
topicsLabel = "-".join(str(topic) for topic in topics)
figure_filename = outfolder+"sel_"+topicsLabel+".png"
plt.savefig(figure_filename, dpi=dpi)
plt.close()
def get_allSimpleProgression_dataToPlot(averageDataset, firstWordsFile,
entriesShown, topic):
"""Function to build a dataframe with all data necessary for plotting."""
print("- getting data to plot...")
with open(averageDataset, "r") as infile:
allScores = pd.DataFrame.from_csv(infile, sep=",")
allScores = allScores.T
#print(allScores)
## Select the data for current topics
someScores = allScores.loc[topic,:]
someScores.index = someScores.index.astype(np.int64)
dataToPlot = someScores
#print(dataToPlot)
return dataToPlot
# TODO: Make sure this is only read once and then select when plotting.
def create_allSimpleProgression_lineplot(dataToPlot, outfolder, fontscale,
firstWordsFile, topic, dpi, height):
"""This function does the actual plotting and saving to disk."""
print("- creating the plot for topic " + topic)
## Get the first words info for the topic
firstWords = get_progression_firstWords(firstWordsFile)
topicFirstWords = firstWords.iloc[int(topic),0]
#print(topicFirstWords)
## Plot the selected data
dataToPlot.plot(kind="line", lw=3, marker="o")
plt.title("Entwicklung über den Textverlauf für "+topicFirstWords, fontsize=20)
plt.ylabel("Topic scores (absolut)", fontsize=16)
plt.xlabel("Textabschnitte", fontsize=16)
plt.setp(plt.xticks()[1], rotation=0, fontsize = 14)
if height != 0:
plt.ylim((0.000,height))
## Saving the plot to disk.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
## Format the topic information for display
topicsLabel = str(topic)
figure_filename = outfolder+"all_"+topicsLabel+".png"
plt.savefig(figure_filename, dpi=dpi)
plt.close()
def simpleProgression(averageDataset, firstWordsFile, outfolder,
numOfTopics,
fontscale, dpi, height, mode, topics):
"""Function to plot topic development over textual progression."""
print("Launched textualProgression.")
if mode == "selected" or mode == "sel":
entriesShown = numOfTopics
dataToPlot = get_selSimpleProgression_dataToPlot(averageDataset,
firstWordsFile,
entriesShown,
topics)
create_selSimpleProgression_lineplot(dataToPlot, outfolder,
fontscale, topics,
dpi, height)
elif mode == "all":
entriesShown = numOfTopics
topics = list(range(0, numOfTopics))
for topic in topics:
topic = str(topic)
dataToPlot = get_allSimpleProgression_dataToPlot(averageDataset,
firstWordsFile,
entriesShown,
topic)
create_allSimpleProgression_lineplot(dataToPlot, outfolder,
fontscale, firstWordsFile,
topic, dpi, height)
else:
print("Please select a valid value for 'mode'.")
print("Done.")
##################################################################
### OTHER / OBSOLETE / DEV ###
##################################################################
###########################
## complex progression ### IN DEVELOPMENT
###########################
def get_selComplexProgression_dataToPlot(averageDataset, firstWordsFile,
entriesShown, topics):
"""Function to build a dataframe with all data necessary for plotting."""
print("- getting data to plot...")
with open(averageDataset, "r") as infile:
allScores = pd.DataFrame.from_csv(infile, sep=",")
allScores = allScores.T
#print(allScores.head())
## Select the data for selected topics
someScores = allScores.loc[topics,:]
someScores.index = someScores.index.astype(np.int64)
## Add information about the firstWords of topics
firstWords = get_progression_firstWords(firstWordsFile)
dataToPlot = pd.concat([someScores, firstWords], axis=1, join="inner")
dataToPlot = dataToPlot.set_index("topicwords")
dataToPlot = dataToPlot.T
#print(dataToPlot)
return dataToPlot
def create_selComplexProgression_lineplot(dataToPlot, outfolder, fontscale,
topics, dpi, height):
"""This function does the actual plotting and saving to disk."""
print("- creating the plot...")
## Plot the selected data
dataToPlot.plot(kind="line", lw=3, marker="o")
plt.title("Entwicklung ausgewählter Topics über den Textverlauf", fontsize=20)
plt.ylabel("Topic scores (absolut)", fontsize=16)
plt.xlabel("Textabschnitte", fontsize=16)
plt.setp(plt.xticks()[1], rotation=0, fontsize = 14)
if height != 0:
plt.ylim((0.000,height))
## Saving the plot to disk.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
## Format the topic information for display
topicsLabel = "-".join(str(topic) for topic in topics)
figure_filename = outfolder+"sel_"+topicsLabel+".png"
plt.savefig(figure_filename, dpi=dpi)
plt.close()
def get_allComplexProgression_dataToPlot(averageDataset, firstWordsFile,
entriesShown, topic, targetCategories):
"""Function to build a dataframe with all data necessary for plotting."""
print("- getting data to plot...")
with open(averageDataset, "r") as infile:
allScores = pd.DataFrame.from_csv(infile, sep=",", index_col=None)
#print(allScores)
## Select the data for current topics
target1 = targetCategories[0]
target2 = targetCategories[1]
target1data = allScores.loc[:,target1]
target2data = allScores.loc[:,target2]
topicScores = allScores.loc[:,topic]
#print(target1data)
#print(target2data)
#print(topicScores)
dataToPlot = pd.concat([target1data, target2data], axis=1)
dataToPlot = pd.concat([dataToPlot, topicScores], axis=1)
#print(dataToPlot)
return dataToPlot
# TODO: Make sure this is only read once and then select when plotting.
def create_allComplexProgression_lineplot(dataToPlot, targetCategories,
outfolder, fontscale,
firstWordsFile, topic, dpi, height):
"""This function does the actual plotting and saving to disk."""
print("- creating the plot for topic " + topic)
## Get the first words info for the topic
firstWords = get_progression_firstWords(firstWordsFile)
topicFirstWords = firstWords.iloc[int(topic),0]
#print(topicFirstWords)
## Split plotting data into parts (for target1)
target1data = dataToPlot.iloc[:,0]
#print(target1data)
numPartialData = len(set(target1data))
## Initialize plot for several lines
completeData = []
#print(dataToPlot)
for target in set(target1data):
#print(" - plotting "+target)
partialData = dataToPlot.groupby(targetCategories[0])
partialData = partialData.get_group(target)
partialData.rename(columns={topic:target}, inplace=True)
partialData = partialData.iloc[:,2:3]
completeData.append(partialData)
#print(completeData)
## Plot the selected data, one after the other
plt.figure()
plt.figure(figsize=(15,10))
for i in range(0, numPartialData):
#print(completeData[i])
label = completeData[i].columns.values.tolist()
label = str(label[0])
plt.plot(completeData[i], lw=4, marker="o", label=label)
plt.legend()
plt.title("Entwicklung über den Textverlauf für "+topicFirstWords, fontsize=20)
plt.ylabel("Topic scores (absolut)", fontsize=16)
plt.xlabel("Textabschnitte", fontsize=16)
plt.legend()
plt.locator_params(axis = 'x', nbins = 10)
plt.setp(plt.xticks()[1], rotation=0, fontsize = 14)
if height != 0:
plt.ylim((0.000,height))
## Saving the plot to disk.
if not os.path.exists(outfolder):
os.makedirs(outfolder)
## Format the topic information for display
topicsLabel = str(topic)
figure_filename = outfolder+"all_"+str(targetCategories[0])+"-"+topicsLabel+".png"
plt.savefig(figure_filename, dpi=dpi)
plt.close()
def complexProgression(averageDataset,
firstWordsFile,
outfolder,
numOfTopics,
targetCategories,
fontscale,
dpi, height,
mode, topics):
"""Function to plot topic development over textual progression."""
print("Launched complexProgression.")
if mode == "sel":
entriesShown = numOfTopics
dataToPlot = get_selSimpleProgression_dataToPlot(averageDataset,
firstWordsFile,
entriesShown,
topics)
create_selSimpleProgression_lineplot(dataToPlot,
outfolder,
fontscale,
topics,
dpi, height)
elif mode == "all":
entriesShown = numOfTopics
topics = list(range(0, numOfTopics))
for topic in topics:
topic = str(topic)
dataToPlot = get_allComplexProgression_dataToPlot(averageDataset,
firstWordsFile,
entriesShown,
topic,
targetCategories)
create_allComplexProgression_lineplot(dataToPlot, targetCategories,
outfolder,
fontscale, firstWordsFile,
topic, dpi, height)
else:
print("Please select a valid value for 'mode'.")
print("Done.")
###########################
## show_segment ###
###########################
import shutil
def show_segment(wdir,segmentID, outfolder):
if not os.path.exists(outfolder):
os.makedirs(outfolder)
shutil.copyfile(wdir+"2_segs/"+segmentID+".txt",outfolder+segmentID+".txt")
###########################
## itemPCA ### IN DEVELOPMENT
###########################
from sklearn.decomposition import PCA
#def build_itemScoreMatrix(averageDatasets, targetCategory,
# topicsPerItem, sortingCriterium):
# """Reads Mallet output (topics with words and word weights) into dataframe."""
# print("- building item score matrix...")
# for averageFile in glob.glob(averageDatasets):
# if targetCategory in averageFile:
# itemScores = pd.read_table(averageFile, header=0, index_col=0, sep=",")
# itemScores = itemScores.T
# if sortingCriterium == "std":
# itemScores["sorting"] = itemScores.std(axis=1)
# elif sortingCriterium == "mean":
# itemScores["sorting"] = itemScores.mean(axis=1)
# itemScores = itemScores.sort(columns=["sorting"], axis=0, ascending=False)
# itemScoreMatrix = itemScores.iloc[0:topicsPerItem,0:-1]
# itemScoreMatrix = itemScoreMatrix.T
# #print(itemScoreMatrix)
# return itemScoreMatrix
def perform_itemPCA(itemScoreMatrix, targetCategory, topicsPerItem,
sortingCriterium, figsize, outfolder):
print("- doing the PCA...")
itemScoreMatrix = itemScoreMatrix.T
targetDimensions = 2
pca = PCA(n_components=targetDimensions)
pca = pca.fit(itemScoreMatrix)
pca = pca.transform(itemScoreMatrix)
# plt.scatter(pca[0,0:20], pca[1,0:20])
for i in list(range(0,len(pca)-1)):
plt.scatter(pca[i,:], pca[i+1,:])
def itemPCA(averageDatasets, targetCategories,
topicsPerItem, sortingCriterium, figsize, outfolder):
"""Function to perform PCA on per-item topic scores and plot the result."""
print("Launched itemPCA.")
for targetCategory in targetCategories:
## Load topic scores per item and turn into score matrix
## (Using the function from itemClustering above!)
itemScoreMatrix = build_itemScoreMatrix(averageDatasets, targetCategory,
topicsPerItem, sortingCriterium)
## Do clustering on the dataframe
perform_itemPCA(itemScoreMatrix, targetCategory, topicsPerItem, sortingCriterium, figsize, outfolder)
print("Done.")
| mit |
ian-r-rose/SHTOOLS | examples/python/TestLegendre/TestLegendre.py | 2 | 4707 | #!/usr/bin/env python
"""
This script tests and plots all Geodesy normalized Legendre functions.
Parameters can be changed in the main function.
"""
# standard imports:
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# import shtools:
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
import pyshtools as shtools
# set shtools plot style:
sys.path.append(os.path.join(os.path.dirname(__file__), "../Common"))
from FigStyle import style_shtools
mpl.rcParams.update(style_shtools)
#==== MAIN FUNCTION ====
def main():
#--- input parameters (change here) ---
normalization = '' # normalization should be one of ['Bar','Schmidt','ON','']
lmax = 40 # maximum degree
mplot = min(lmax, 10) # maximum plotting order (all degrees are plotted)
#--- run tests ---
test_associatedlegendre(lmax, mplot, normalization)
test_legendre(lmax, normalization)
#==== TEST LEGENDRE FUNCTIONS ====
def test_legendre(lmax, normalization):
print 'testing Pl{0} and Pl{0}_d1...'.format(normalization)
#--- import function from shtools ---
if normalization == '':
Pl = shtools.PLegendre
Pl_d1 = shtools.PLegendre_d1
else:
Pl = getattr(shtools, 'Pl' + normalization)
Pl_d1 = getattr(shtools, 'Pl' + normalization + '_d1')
#--- derived parameters ---
npoints = 5 * lmax
ls = np.arange(lmax)
cost = np.cos(np.linspace(np.pi / npoints, np.pi - np.pi / npoints, npoints))
#--- create arrays to store Legendre functions of degrees l and orders m at all points cost ---
Pl1 = np.zeros((npoints, lmax))
Pl2 = np.zeros((npoints, lmax))
dPl2 = np.zeros((npoints, lmax))
for iz, z in enumerate(cost):
Pl1_buf = Pl(lmax, z)
Pl2_buf, dPl2_buf = Pl_d1(lmax, z)
for l in ls:
Pl1[iz, l] = Pl1_buf[l]
Pl2[iz, l] = Pl2_buf[l]
dPl2[iz, l] = dPl2_buf[l]
#---- check if both subroutines computed the same Legendre functions ---
if not np.allclose(Pl1, Pl2, rtol=1e-10):
raise Exception('Legendre functions from PlmON and PlmON_d1 are different (rtol>1e-10)')
#---- plot the legendre functions and derivatives up to maximum order mplot ---
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(15, 6))
fig.suptitle('orthonormalized Legendre functions (col1) and derivatives (col2)')
ax[0].imshow(Pl1[:, :], extent=(0., lmax, 0., np.pi), aspect='auto')
ax[1].imshow(dPl2[:, :], extent=(0., lmax, 0., np.pi), aspect='auto')
ax[1].set_xlabel('l')
fig.savefig('legendre.png')
#==== TEST ASSOCIATED LEGENDRE FUNCTIONS ====
def test_associatedlegendre(lmax, mplot, normalization):
print 'testing Plm{0} and Plm{0}_d1...'.format(normalization)
#--- import function from shtools ---
if normalization == '':
Plm = shtools.PLegendreA
Plm_d1 = shtools.PLegendreA_d1
else:
Plm = getattr(shtools, 'Plm' + normalization)
Plm_d1 = getattr(shtools, 'Plm' + normalization + '_d1')
#--- derived parameters ---
npoints = 5 * lmax
ls = np.arange(lmax)
cost = np.cos(np.linspace(np.pi / npoints, np.pi - np.pi / npoints, npoints))
#--- create arrays to store Legendre functions of degrees l and orders m at all points cost ---
Plm1 = np.zeros((npoints, lmax, lmax))
Plm2 = np.zeros((npoints, lmax, lmax))
dPlm2 = np.zeros((npoints, lmax, lmax))
for iz, z in enumerate(cost):
Plm1_buf = Plm(lmax, z)
Plm2_buf, dPlm2_buf = Plm_d1(lmax, z)
for l in ls:
for m in np.arange(l):
ind = shtools.PlmIndex(l, m) - 1 # Fortran indexing
Plm1[iz, l, m] = Plm1_buf[ind]
Plm2[iz, l, m] = Plm2_buf[ind]
dPlm2[iz, l, m] = dPlm2_buf[ind]
#---- check if both subroutines computed the same Legendre functions ---
if not np.allclose(Plm1_buf, Plm2_buf, rtol=1e-10):
raise Exception('Legendre functions from PlmON and PlmON_d1 are different (rtol>1e-10)')
#---- plot the legendre functions and derivatives up to maximum order mplot ---
fig, ax = plt.subplots(2, mplot, sharey=True, sharex=True, figsize=(15, 6))
fig.suptitle('orthonormalized associated Legendre functions (row1) and derivatives (row2)')
for m in range(mplot):
ax[0, m].imshow(Plm1[:, :, m], extent=(0., lmax, 0., np.pi), aspect='auto')
ax[0, m].set_title('m=%d' % m)
ax[1, m].imshow(dPlm2[:, :, m], extent=(0., lmax, 0., np.pi), aspect='auto')
ax[1, m].set_xlabel('l')
fig.savefig('associatedlegendre.png')
#==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| bsd-3-clause |
OLAPLINE/TM1py | Tests/Utils.py | 1 | 41892 | import configparser
import json
from pathlib import Path
import unittest
import uuid
import pandas as pd
from TM1py import Subset
from TM1py.Objects import Process, Dimension, Hierarchy, Cube
from TM1py.Services import TM1Service
from TM1py.Utils import TIObfuscator
from TM1py.Utils import Utils, MDXUtils
from TM1py.Utils.MDXUtils import DimensionSelection, read_dimension_composition_from_mdx, \
read_dimension_composition_from_mdx_set_or_tuple, read_dimension_composition_from_mdx_set, \
read_dimension_composition_from_mdx_tuple, split_mdx, _find_case_and_space_insensitive_first_occurrence
from TM1py.Utils.Utils import dimension_hierarchy_element_tuple_from_unique_name
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('config.ini'))
PREFIX = "TM1py_Tests_Utils_"
MDX_TEMPLATE = """
SELECT
{rows} ON ROWS,
{columns} ON COLUMNS
FROM {cube}
WHERE {where}
"""
MDX_TEMPLATE_SHORT = """
SELECT
{rows} ON ROWS,
{columns} ON COLUMNS
FROM {cube}
"""
class TestMDXUtils(unittest.TestCase):
tm1 = None
@classmethod
def setUpClass(cls):
# Connect to TM1
cls.tm1 = TM1Service(**config['tm1srv01'])
# Build 4 Dimensions
cls.dim1_name = PREFIX + "Dimension1"
cls.dim1_element_names = ["A " + str(i) for i in range(10)]
cls.dim1 = Dimension(cls.dim1_name)
h = Hierarchy(cls.dim1_name, cls.dim1_name)
for element_name in cls.dim1_element_names:
h.add_element(element_name, 'Numeric')
cls.dim1.add_hierarchy(h)
cls.dim2_name = PREFIX + "Dimension2"
cls.dim2_element_names = ["B " + str(i) for i in range(10)]
cls.dim2 = Dimension(cls.dim2_name)
h = Hierarchy(cls.dim2_name, cls.dim2_name)
for element_name in cls.dim2_element_names:
h.add_element(element_name, 'Numeric')
cls.dim2.add_hierarchy(h)
cls.dim3_name = PREFIX + "Dimension3"
cls.dim3_element_names = ["C " + str(i) for i in range(10)]
cls.dim3 = Dimension(cls.dim3_name)
h = Hierarchy(cls.dim3_name, cls.dim3_name)
for element_name in cls.dim3_element_names:
h.add_element(element_name, 'Numeric')
cls.dim3.add_hierarchy(h)
cls.dim4_name = PREFIX + "Dimension4"
cls.dim4_element_names = ["D " + str(i) for i in range(10)]
cls.dim4 = Dimension(cls.dim4_name)
h = Hierarchy(cls.dim4_name, cls.dim4_name)
for element_name in cls.dim4_element_names:
h.add_element(element_name, 'Numeric')
cls.dim4.add_hierarchy(h)
# Define cube with 4 dimensions
cls.cube_name = PREFIX + "Cube"
cls.cube = Cube(
name=cls.cube_name,
dimensions=[cls.dim1_name, cls.dim2_name, cls.dim3_name, cls.dim4_name])
def setUp(self):
if self.tm1.cubes.exists(self.cube_name):
self.tm1.cubes.delete(self.cube_name)
for dimension in (self.dim1, self.dim2, self.dim3, self.dim4):
if self.tm1.dimensions.exists(dimension.name):
self.tm1.dimensions.delete(dimension.name)
self.tm1.dimensions.create(dimension)
self.tm1.cubes.create(self.cube)
# Build Subset
self.dim4_subset_Name = PREFIX + "Subset"
self.tm1.dimensions.subsets.create(Subset(
subset_name=self.dim4_subset_Name,
dimension_name=self.dim4_name,
hierarchy_name=self.dim4_name,
expression="HEAD([{}].Members, 1)".format(self.dim4_name)))
def tearDown(self):
self.tm1.cubes.delete(self.cube_name)
self.tm1.dimensions.delete(self.dim1_name)
self.tm1.dimensions.delete(self.dim2_name)
self.tm1.dimensions.delete(self.dim3_name)
self.tm1.dimensions.delete(self.dim4_name)
def test_construct_mdx(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name))]
contexts = {self.dim4_name: self.dim4_element_names[0]}
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
contexts=contexts,
suppress=None)
content = self.tm1.cubes.cells.execute_mdx(mdx)
number_cells = len(content.keys())
self.assertEqual(number_cells, 1000)
def test_construct_mdx_no_titles(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name)),
DimensionSelection(
dimension_name=self.dim4_name,
subset=self.dim4_subset_Name)]
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress=None)
content = self.tm1.cubes.cells.execute_mdx(mdx)
number_cells = len(content.keys())
self.assertEqual(number_cells, 1000)
def test_construct_mdx_suppress_zeroes(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name)),
DimensionSelection(
dimension_name=self.dim4_name,
subset=self.dim4_subset_Name)]
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress="BOTH")
content = self.tm1.cubes.cells.execute_mdx(mdx)
number_cells = len(content.keys())
self.assertLess(number_cells, 1000)
def test_determine_selection_type(self):
self.assertEqual(
DimensionSelection.determine_selection_type(elements=["e1", "e2"], subset=None, expression=None),
DimensionSelection.ITERABLE)
self.assertEqual(
DimensionSelection.determine_selection_type(["e1", "e2"]),
DimensionSelection.ITERABLE)
self.assertEqual(
DimensionSelection.determine_selection_type(elements=None, subset="something", expression=None),
DimensionSelection.SUBSET)
self.assertEqual(
DimensionSelection.determine_selection_type(None, "something", None),
DimensionSelection.SUBSET)
self.assertEqual(
DimensionSelection.determine_selection_type(elements=None, subset=None, expression="{[d1].[e1]}"),
DimensionSelection.EXPRESSION)
self.assertEqual(
DimensionSelection.determine_selection_type(None, None, "{[d1].[e1]}"),
DimensionSelection.EXPRESSION)
self.assertEqual(
DimensionSelection.determine_selection_type(elements=None, subset=None, expression=None),
None)
self.assertEqual(
DimensionSelection.determine_selection_type(None, None, None),
None)
self.assertEqual(
DimensionSelection.determine_selection_type(),
None)
self.assertRaises(
ValueError,
DimensionSelection.determine_selection_type, ["e2"], "subset1", "{[d1].[e1]}")
self.assertRaises(
ValueError,
DimensionSelection.determine_selection_type, ["e2"], "subset1")
self.assertRaises(
ValueError,
DimensionSelection.determine_selection_type, ["e2"], None, "subset1")
def test_curly_braces(self):
self.assertEqual(
MDXUtils.curly_braces("something"),
"{something}")
self.assertEqual(
MDXUtils.curly_braces("something}"),
"{something}")
self.assertEqual(
MDXUtils.curly_braces("{something"),
"{something}")
self.assertEqual(
MDXUtils.curly_braces("{something}"),
"{something}")
def test_build_element_unique_names_without_hierarchies(self):
dimension_names = ["dim1", "dim1"]
element_names = ["elem1", "elem2"]
gen = Utils.build_element_unique_names(dimension_names=dimension_names, element_names=element_names)
element_unique_names = list(gen)
self.assertEqual(len(element_unique_names), 2)
self.assertTrue("[dim1].[elem1]" in element_unique_names)
self.assertTrue("[dim1].[elem2]" in element_unique_names)
def test_build_element_unique_names_with_hierarchies(self):
dimension_names = ["dim1", "dim1", "dim1"]
hierarchy_names = ["hier1", "hier2", "hier3"]
element_names = ["elem1", "elem2", "elem3"]
gen = Utils.build_element_unique_names(
dimension_names=dimension_names, hierarchy_names=hierarchy_names, element_names=element_names)
element_unique_names = list(gen)
self.assertEqual(len(element_unique_names), 3)
self.assertTrue("[dim1].[hier1].[elem1]" in element_unique_names)
self.assertTrue("[dim1].[hier2].[elem2]" in element_unique_names)
self.assertTrue("[dim1].[hier3].[elem3]" in element_unique_names)
def test_build_pandas_multiindex_dataframe_from_cellset(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name)),
DimensionSelection(
dimension_name=self.dim4_name,
subset=self.dim4_subset_Name)]
suppress = None
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress=suppress)
cellset = self.tm1.cubes.cells.execute_mdx(mdx)
df = Utils.build_pandas_dataframe_from_cellset(cellset, multiindex=True)
self.assertIsInstance(df, pd.DataFrame)
self.assertTrue(df.shape[0] == 1000)
self.assertTrue(df.shape[1] == 1)
cellset = Utils.build_cellset_from_pandas_dataframe(df)
self.assertTrue(len(cellset.keys()) == 1000)
self.assertIsInstance(cellset, Utils.CaseAndSpaceInsensitiveTuplesDict)
def test_build_pandas_dataframe_from_cellset(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name)),
DimensionSelection(
dimension_name=self.dim4_name,
subset=self.dim4_subset_Name)]
suppress = None
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress=suppress)
cellset = self.tm1.cubes.cells.execute_mdx(mdx)
df = Utils.build_pandas_dataframe_from_cellset(cellset, multiindex=False)
self.assertTrue(df.shape[0] == 1000)
# cater for potential Sandboxes dimension on first position
if df.columns[0] == "Sandboxes":
self.assertTrue(df.shape[1] == 6)
else:
self.assertTrue(df.shape[1] == 5)
self.assertIsInstance(df, pd.DataFrame)
cellset = Utils.build_cellset_from_pandas_dataframe(df)
self.assertTrue(len(cellset.keys()) == 1000)
self.assertIsInstance(cellset, Utils.CaseAndSpaceInsensitiveTuplesDict)
def test_build_pandas_dataframe_empty_cellset(self):
self.tm1.cubes.cells.write_value(
value=0,
cube_name=self.cube_name,
element_tuple=(self.dim1_element_names[0], self.dim2_element_names[0],
self.dim3_element_names[0], self.dim4_element_names[0]),
dimensions=(self.dim1_name, self.dim2_name, self.dim3_name, self.dim4_name))
rows = [DimensionSelection(dimension_name=self.dim1_name, elements=(self.dim1_element_names[0],)),
DimensionSelection(dimension_name=self.dim2_name, elements=(self.dim2_element_names[0],))]
columns = [DimensionSelection(dimension_name=self.dim3_name, elements=(self.dim3_element_names[0],)),
DimensionSelection(dimension_name=self.dim4_name, elements=(self.dim4_element_names[0],))]
suppress = "Both"
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress=suppress)
empty_cellset = self.tm1.cubes.cells.execute_mdx(mdx)
self.assertRaises(ValueError, Utils.build_pandas_dataframe_from_cellset, empty_cellset, True)
self.assertRaises(ValueError, Utils.build_pandas_dataframe_from_cellset, empty_cellset, False)
@unittest.skip("Not deterministic. Needs improvement.")
def test_read_cube_name_from_mdx(self):
all_cube_names = self.tm1.cubes.get_all_names()
for cube_name in all_cube_names:
private_views, public_views = self.tm1.cubes.views.get_all(cube_name)
for view in private_views + public_views:
mdx = view.MDX
self.assertEquals(
cube_name.upper().replace(" ", ""),
MDXUtils.read_cube_name_from_mdx(mdx))
def test_dimension_hierarchy_element_tuple_from_unique_name(self):
unique_element_name = "[d1].[e1]"
dimension, hierarchy, element = dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)
self.assertEqual(dimension, "d1")
self.assertEqual(hierarchy, "d1")
self.assertEqual(element, "e1")
unique_element_name = "[d1].[d1].[e1]"
dimension, hierarchy, element = dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)
self.assertEqual(dimension, "d1")
self.assertEqual(hierarchy, "d1")
self.assertEqual(element, "e1")
unique_element_name = "[d1].[leaves].[e1]"
dimension, hierarchy, element = dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)
self.assertEqual(dimension, "d1")
self.assertEqual(hierarchy, "leaves")
self.assertEqual(element, "e1")
def test_read_dimension_composition_from_mdx_simple1(self):
mdx = MDX_TEMPLATE.format(
rows="{{ [{}].MEMBERS }} * {{ [{}].MEMBERS }}".format(self.dim1_name, self.dim2_name),
columns="{{ [{}].MEMBERS }}".format(self.dim3_name),
cube="[{}]".format(self.cube_name),
where="([{}].[{}])".format(self.dim4_name, self.dim4_element_names[0])
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name])
self.assertEqual(titles, [self.dim4_name])
def test_read_dimension_composition_from_mdx_simple2(self):
mdx = MDX_TEMPLATE.format(
rows="{{ [{}].MEMBERS }}".format(self.dim3_name),
columns="{{ [{}].MEMBERS }} * {{ [{}].MEMBERS }}".format(self.dim1_name, self.dim2_name),
cube="[{}]".format(self.cube_name),
where="( [{}].[{}] )".format(self.dim4_name, self.dim4_element_names[0])
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim3_name])
self.assertEqual(columns, [self.dim1_name, self.dim2_name])
self.assertEqual(titles, [self.dim4_name])
def test_read_dimension_composition_from_mdx_simple3(self):
mdx = MDX_TEMPLATE.format(
rows="{[" + self.dim3_name + "].MEMBERS}",
columns="{[" + self.dim1_name + "].MEMBERS}",
cube="[{}]".format(self.cube_name),
where="([{}].[{}], [{}].[{}])".format(self.dim4_name, self.dim4_element_names[0], self.dim2_name,
self.dim2_element_names[0])
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim3_name])
self.assertEqual(columns, [self.dim1_name])
self.assertEqual(titles, [self.dim4_name, self.dim2_name])
def test_read_dimension_composition_from_mdx_without_titles(self):
mdx = MDX_TEMPLATE_SHORT.format(
rows="{[" + self.dim1_name + "].MEMBERS} * {[" + self.dim2_name + "].MEMBERS}",
columns="{[" + self.dim3_name + "].MEMBERS} * {[" + self.dim4_name + "].MEMBERS}",
cube="[{}]".format(self.cube_name)
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name, self.dim4_name])
def test_read_dimension_composition_from_mdx_asynchronous_single(self):
mdx = MDX_TEMPLATE.format(
rows="{([" + self.dim1_name + "].[" + self.dim1_element_names[0] + "], [" + self.dim2_name + "].[" +
self.dim2_element_names[0] + "])}",
columns="{[" + self.dim3_name + "].MEMBERS}",
cube="[{}]".format(self.cube_name),
where="([" + self.dim4_name + "].[" + self.dim4_element_names[0] + "])"
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name])
self.assertEqual(titles, [self.dim4_name])
def test_read_dimension_composition_from_mdx_asynchronous_multi(self):
mdx = MDX_TEMPLATE_SHORT.format(
rows="{([" + self.dim1_name + "].[" + self.dim1_element_names[0] + "], [" + self.dim2_name + "].[" +
self.dim2_element_names[0] + "]),([" + self.dim1_name + "].[" + self.dim1_element_names[
1] + "], [" + self.dim2_name + "].[" +
self.dim2_element_names[1] + "]) }",
columns="{([" + self.dim3_name + "].[" + self.dim3_element_names[0] + "], [" + self.dim4_name + "].[" +
self.dim4_element_names[0] + "]),([" + self.dim3_name + "].[" + self.dim3_element_names[
1] + "], [" + self.dim4_name + "].[" +
self.dim4_element_names[1] + "]) }",
cube="[{}]".format(self.cube_name)
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name, self.dim4_name])
self.assertEqual(titles, [])
def test_read_dimension_composition_from_mdx_set_or_tuple(self):
mdx_set = "{[dim1].[element1]} * {[dim2].[element2]}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_set = "{[dim1].[element1], [dim1].[element2]}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{[dim1].Members}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{Tm1SubsetAll([dim1])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_tuple = "{([dim1].[element1], [dim2].[element2])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_tuple = "{([dim1].[element1])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1"])
mdx_tuple = "{([dim1].[element1], [dim2].[element2]), ([dim1].[element8], [dim2].[element5])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
def test_read_dimension_composition_from_mdx_set(self):
mdx_set = "{[dim1].[element1]} * {[dim2].[element2]}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_set = "{[dim1].[element1], [dim1].[element2]}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{[dim1].Members}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{Tm1SubsetAll([dim1])}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1"])
def test_read_dimension_composition_from_mdx_tuple(self):
mdx_tuple = "{([dim1].[element1], [dim2].[element2])}"
dimensions = read_dimension_composition_from_mdx_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_tuple = "{([dim1].[element1])}"
dimensions = read_dimension_composition_from_mdx_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1"])
mdx_tuple = "{([dim1].[element1], [dim2].[element2]), ([dim1].[element8], [dim2].[element5])}"
dimensions = read_dimension_composition_from_mdx_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
def test_split_mdx_sets(self):
rows = "{{ [{dim1}].[elem1] , [{dim2}].[{elem2}] }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0]
)
columns = "{{ [{}].MEMBERS }}".format(self.dim3_name)
cube = "[{}]".format(self.cube_name)
where = "([{}].[{}])".format(self.dim4_name, self.dim4_element_names[0])
mdx = MDX_TEMPLATE.format(
rows=rows,
columns=columns,
cube=cube,
where=where
)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
self.assertEqual(where.replace(" ", ""), mdx_where)
def test_split_mdx_tuples_without_where(self):
rows = "{{ ( [{dim1}].[{elem1}], [{dim2}].[{elem2}] ) , ( [{dim1}].[{elem3}]. [{dim2}].[{elem4}] ) }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
elem3=self.dim2_element_names[1],
elem4=self.dim2_element_names[1]
)
columns = "{{([{dim3}].[{elem1}], [{dim4}].[{elem2}])}}".format(
dim3=self.dim3_name,
elem1=self.dim3_element_names[0],
dim4=self.dim4_name,
elem2=self.dim4_element_names[0]
)
cube = "[{}]".format(self.cube_name)
mdx = MDX_TEMPLATE_SHORT.format(
rows=rows,
columns=columns,
cube=cube
)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
def test_split_mdx_tuples_with_where(self):
rows = "{{ ( [{dim1}].[{elem1}], [{dim2}].[{elem2}] ) , ( [{dim1}].[{elem3}]. [{dim2}].[{elem4}] ) }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
elem3=self.dim2_element_names[1],
elem4=self.dim2_element_names[1]
)
columns = "{{ ( [{dim3}].[{elem1}] ) }}".format(
dim3=self.dim3_name,
elem1=self.dim3_element_names[0]
)
cube = "[{}]".format(self.cube_name)
where = "( [{dim4}].[{elem1}] )".format(
dim4=self.dim4_name,
elem1=self.dim4_element_names[0]
)
mdx = MDX_TEMPLATE.format(
rows=rows,
columns=columns,
cube=cube,
where=where
)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
self.assertEqual(where.replace(" ", ""), mdx_where)
def test_split_mdx_sets_and_tuples(self):
rows = "{{ ( [{dim1}].[{elem1}], [{dim2}].[{elem2}] ) , ( [{dim1}].[{elem3}]. [{dim2}].[{elem4}] ) }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
elem3=self.dim2_element_names[1],
elem4=self.dim2_element_names[1]
)
columns = "{{ Tm1SubsetAll ( [{dim3}] ) }}".format(
dim3=self.dim3_name,
elem1=self.dim3_element_names[0]
)
cube = "[{}]".format(self.cube_name)
where = "( [{dim4}].[{elem2}] )".format(
dim4=self.dim4_name,
elem2=self.dim4_element_names[0]
)
mdx = MDX_TEMPLATE.format(
rows=rows,
columns=columns,
cube=cube,
where=where
)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
self.assertEqual(where.replace(" ", ""), mdx_where)
def test_find_case_and_space_insensitive_first_occurrence(self):
mdx = MDX_TEMPLATE.format(
rows="{{ [{}].MEMBERS }}".format(self.dim3_name),
columns="{{ [{}].MEMBERS }} * {{ [{}].MEMBERS }}".format(self.dim1_name, self.dim2_name),
cube="[{}]".format(self.cube_name),
where="( [{}].[{}] )".format(self.dim4_name, self.dim4_element_names[0]))
selection, rest = _find_case_and_space_insensitive_first_occurrence(
text=mdx,
pattern_start="ROWS,",
pattern_end="}ON COLUMNS")
self.assertEqual(
"ROWS,{[TM1py_Tests_Utils_Dimension1].MEMBERS}*{[TM1py_Tests_Utils_Dimension2].MEMBERS}",
selection)
self.assertEqual(
"FROM[TM1py_Tests_Utils_Cube]WHERE([TM1py_Tests_Utils_Dimension4].[D0])",
rest)
def test_extract_unique_name_from_members(self):
members = [
{'UniqueName': '[Dimension3].[Dimension3].[Element 592]',
'Element': {'UniqueName': '[Dimension3].[Dimension3].[Element 592]'}}]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
["[Dimension3].[Dimension3].[Element 592]"])
members = [{'UniqueName': '[Dimension1].[Dimension1].[Element 790]',
'Element': {'UniqueName': '[Dimension1].[Dimension1].[Element 790]'}},
{'UniqueName': '[Dimension2].[Dimension2].[Element 541]',
'Element': {'UniqueName': '[Dimension2].[Dimension2].[Element 541]'}}]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
["[Dimension1].[Dimension1].[Element 790]", "[Dimension2].[Dimension2].[Element 541]"])
members = [{'UniqueName': '',
'Element': {'UniqueName': '[Dimension1].[Dimension1].[Element 790]'}},
{'UniqueName': '',
'Element': {'UniqueName': '[Dimension2].[Dimension2].[Element 541]'}}]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
["[Dimension1].[Dimension1].[Element 790]", "[Dimension2].[Dimension2].[Element 541]"])
members = [{'UniqueName': '[Dimension1].[Dimension1].[Element 790]',
'Element': None},
{'UniqueName': '[Dimension2].[Dimension2].[Element 541]',
'Element': None}]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
["[Dimension1].[Dimension1].[Element 790]", "[Dimension2].[Dimension2].[Element 541]"])
def test_extract_axes_from_cellset(self):
with open(Path(__file__).parent.joinpath("resources", "raw_cellset.json")) as file:
raw_cellset_as_dict = json.load(file)
row_axis, column_axis, title_axis = Utils.extract_axes_from_cellset(raw_cellset_as_dict=raw_cellset_as_dict)
self.assertIn("[City].[City].[NYC]", json.dumps(row_axis))
self.assertIn("[City].[City].[Chicago]", json.dumps(row_axis))
self.assertIn("[Date].[Date].[2017-11-26]", json.dumps(column_axis))
self.assertIn("[Date].[Date].[2017-11-27]", json.dumps(column_axis))
self.assertIn("[Version].[Version].[Actual]", json.dumps(title_axis))
def test_odata_escape_single_quotes_in_object_names(self):
url = "https://localhost:8099/api/v1/Dimensions('dime'nsion')/Hierarchies('hier'archy')/Elements('elem'ent')"
url1 = "https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test 'Case' cube*'"
url2 = "https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test C_'ase cube'"
url3 = "https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test C9'as*'&e cube'"
url4 = "https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test C9'_ase cube'"
url5 = "https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test Case cube'"
escaped_url = Utils.odata_escape_single_quotes_in_object_names(url)
escaped_url1 = Utils.odata_escape_single_quotes_in_object_names(url1)
escaped_url2 = Utils.odata_escape_single_quotes_in_object_names(url2)
escaped_url3 = Utils.odata_escape_single_quotes_in_object_names(url3)
escaped_url4 = Utils.odata_escape_single_quotes_in_object_names(url4)
escaped_url5 = Utils.odata_escape_single_quotes_in_object_names(url5)
self.assertEqual(
escaped_url,
"https://localhost:8099/api/v1/Dimensions('dime''nsion')/Hierarchies('hier''archy')/Elements('elem''ent')")
self.assertEqual(
escaped_url1,
"https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test ''Case'' cube*'")
self.assertEqual(
escaped_url2,
"https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test C_''ase cube'")
self.assertEqual(
escaped_url3,
"https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test C9''as*''&e cube'")
self.assertEqual(
escaped_url4,
"https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test C9''_ase cube'")
self.assertEqual(
escaped_url5,
"https://localhost:915/api/v1/TransactionLogEntries?$orderby=TimeStamp desc &$filter=Cube eq 'Test Case cube'")
def test_odata_escape_single_quotes_in_object_names_group(self):
url = "https://localhost:8099/api/v1/Groups('Gro'up')"
escaped_url = Utils.odata_escape_single_quotes_in_object_names(url)
self.assertEqual(
escaped_url,
"https://localhost:8099/api/v1/Groups('Gro''up')")
def test_odata_escape_single_quotes_in_object_names_user(self):
url = "https://localhost:8099/api/v1/Users('Us'er')"
escaped_url = Utils.odata_escape_single_quotes_in_object_names(url)
self.assertEqual(
escaped_url,
"https://localhost:8099/api/v1/Users('Us''er')")
def test_odata_escape_single_quotes_in_object_names_element(self):
url = "https://localhost:8099/api/v1/Dimensions('dimen'sion')/Hierarchies('hier'archy')/Elements('elem'ent')"
escaped_url = Utils.odata_escape_single_quotes_in_object_names(url)
self.assertEqual(
escaped_url,
"https://localhost:8099/api/v1/Dimensions('dimen''sion')/Hierarchies('hier''archy')/Elements('elem''ent')")
def test_odata_escape_single_quotes_in_object_names_custom_request_threads(self):
url = "https://localhost:8099/api/v1/Threads?$top=0&$filter=ObjectType eq 'Process' and " \
"ObjectName ne 'Process - Get Params REST'&$count=true"
escaped_url = Utils.odata_escape_single_quotes_in_object_names(url)
self.assertEqual(
escaped_url,
url)
@classmethod
def tearDownClass(cls):
cls.tm1.logout()
class TestTIObfuscatorMethods(unittest.TestCase):
tm1 = None
@classmethod
def setUpClass(cls):
# Namings
cls.expand_process_name = str(uuid.uuid4())
cls.expand_process_name_obf = str(uuid.uuid4())
cls.process_name = str(uuid.uuid4())
cls.process_name_obf = str(uuid.uuid4())
cls.dimension_name = str(uuid.uuid4())
cls.dimension_name_cloned = str(uuid.uuid4())
cls.cube_name = str(uuid.uuid4())
cls.cube_name_cloned = str(uuid.uuid4())
# Connect to TM1
cls.tm1 = TM1Service(**config['tm1srv01'])
# create process
prolog = "\r\nSaveDataAll;\r\nsText='abcABC';\r\n"
epilog = "SaveDataAll;"
cls.process = Process(
name=cls.process_name,
prolog_procedure=prolog,
epilog_procedure=epilog)
# create process with expand in TM1
if cls.tm1.processes.exists(cls.process.name):
cls.tm1.processes.delete(cls.process.name)
cls.tm1.processes.create(cls.process)
# create process with expand
prolog = "\r\nnRevenue = 20;\r\nsRevenue = EXPAND('%nrevenue%');\r\nIF(sRevenue @ <> '20.000');\r\n" \
"ProcessBreak;\r\nENDIF;"
cls.expand_process = Process(
name=cls.expand_process_name,
prolog_procedure=prolog)
# create process with expand in TM1
if cls.tm1.processes.exists(cls.expand_process.name):
cls.tm1.processes.delete(cls.expand_process.name)
cls.tm1.processes.create(cls.expand_process)
# create dimension that we clone through obfuscated bedrock as part of the test
if not cls.tm1.dimensions.exists(cls.dimension_name):
d = Dimension(cls.dimension_name)
h = Hierarchy(cls.dimension_name, cls.dimension_name)
h.add_element('Total Years', 'Consolidated')
h.add_element('No Year', 'Numeric')
for year in range(1989, 2040, 1):
h.add_element(str(year), 'Numeric')
h.add_edge('Total Years', str(year), 1)
d.add_hierarchy(h)
cls.tm1.dimensions.create(d)
# Create 2 Attributes through TI
ti_statements = ["AttrInsert('{}','','Previous Year', 'S')".format(cls.dimension_name),
"AttrInsert('{}','','Next Year', 'S');".format(cls.dimension_name)]
ti = ';'.join(ti_statements)
cls.tm1.processes.execute_ti_code(lines_prolog=ti)
# create }ElementAttribute values
cellset = {}
for year in range(1989, 2040, 1):
cellset[(str(year), 'Previous Year')] = year - 1
cellset[(str(year), 'Next Year')] = year + 1
cls.tm1.cubes.cells.write_values("}ElementAttributes_" + cls.dimension_name, cellset)
# create a simple cube to be cloned through bedrock
if not cls.tm1.cubes.exists(cls.cube_name):
cube = Cube(cls.cube_name, ["}Dimensions", "}Cubes"], "[]=S:'TM1py';")
cls.tm1.cubes.create(cube)
# create bedrocks if they doesn't exist
for bedrock in ("Bedrock.Dim.Clone", "Bedrock.Cube.Clone"):
if not cls.tm1.processes.exists(bedrock):
with open(Path(__file__).parent.joinpath("resources", bedrock + ".json"), "r") as file:
process = Process.from_json(file.read())
cls.tm1.processes.create(process)
def test_split_into_statements(self):
code = "sText1 = 'abcdefgh';\r\n" \
" nElem = 2;\r\n" \
" # dasjd; dasjdas '' qdawdas\r\n" \
"# daskldlaskjdla aksdlas;das \r\n" \
" # dasdwad\r\n" \
"sText2 = 'dasjnd;jkas''dasdas'';dasdas';\r\n" \
"SaveDataAll;"
code = TIObfuscator.remove_comment_lines(code)
statements = TIObfuscator.split_into_statements(code)
self.assertEqual(len(statements), 4)
def test_expand(self):
if self.tm1.processes.exists(self.expand_process_name_obf):
self.tm1.processes.delete(self.expand_process_name_obf)
process = self.tm1.processes.get(self.expand_process_name)
process_obf = TIObfuscator.obfuscate_process(process, self.expand_process_name_obf)
self.tm1.processes.create(process_obf)
self.tm1.processes.execute(process_obf.name, {})
def test_remove_generated_code(self):
code = "#****Begin: Generated Statements***\r\n" \
"DIMENSIONELEMENTINSERT('Employee','',V1,'s');\r\n" \
"DIMENSIONELEMENTINSERT('Employee','',V2,'s');\r\n" \
"DIMENSIONELEMENTINSERT('Employee','',V3,'s');\r\n" \
"DIMENSIONELEMENTINSERT('Employee','',V4,'s');\r\n" \
"#****End: Generated Statements****\r\n" \
"\r\n" \
"sText = 'test';"
code = TIObfuscator.remove_generated_code(code)
self.assertNotIn("#****Begin", code)
self.assertNotIn("DIMENSIONELEMENTINSERT", code)
self.assertNotIn("#****End", code)
self.assertIn("sText = 'test';", code)
def test_obfuscate_code(self):
if self.tm1.processes.exists(self.process_name_obf):
self.tm1.processes.delete(self.process_name_obf)
process_obf = TIObfuscator.obfuscate_process(self.process, self.process_name_obf)
self.tm1.processes.create(process_obf)
def test_bedrock_clone_dim(self):
if self.tm1.processes.exists("Bedrock.Dim.Clone.Obf"):
self.tm1.processes.delete("Bedrock.Dim.Clone.Obf")
p = self.tm1.processes.get("Bedrock.Dim.Clone")
p_obf = TIObfuscator.obfuscate_process(
process=p,
new_name='Bedrock.Dim.Clone.Obf')
self.tm1.processes.create(p_obf)
# call obfuscated process
parameters = {
"Parameters":
[
{"Name": "pSourceDim", "Value": self.dimension_name},
{"Name": "pTargetDim", "Value": self.dimension_name_cloned},
{"Name": "pAttr", "Value": "1"}
]
}
self.tm1.processes.execute("Bedrock.Dim.Clone.Obf", parameters)
def test_bedrock_clone_cube(self):
if self.tm1.processes.exists("Bedrock.Cube.Clone.Obf"):
self.tm1.processes.delete("Bedrock.Cube.Clone.Obf")
p = self.tm1.processes.get("Bedrock.Cube.Clone")
p_obf = TIObfuscator.obfuscate_process(process=p, new_name='Bedrock.Cube.Clone.Obf')
self.tm1.processes.create(p_obf)
# call obfuscated process
parameters = {
"Parameters":
[
{"Name": "pSourceCube", "Value": self.cube_name},
{"Name": "pTargetCube", "Value": self.cube_name_cloned},
{"Name": "pIncludeRules", "Value": "1"},
{"Name": "pIncludeData", "Value": "1"},
{"Name": "pDebug", "Value": "1"}
]
}
self.tm1.processes.execute("Bedrock.Cube.Clone.Obf", parameters)
@classmethod
def tearDownClass(cls):
# delete all this stuff
cls.tm1.processes.delete(cls.expand_process_name)
cls.tm1.processes.delete(cls.expand_process_name_obf)
cls.tm1.processes.delete(cls.process_name)
cls.tm1.processes.delete(cls.process_name_obf)
cls.tm1.processes.delete("Bedrock.Dim.Clone.Obf")
cls.tm1.processes.delete("Bedrock.Cube.Clone.Obf")
cls.tm1.dimensions.delete(cls.dimension_name)
cls.tm1.dimensions.delete(cls.dimension_name_cloned)
cls.tm1.cubes.delete(cls.cube_name)
cls.tm1.cubes.delete(cls.cube_name_cloned)
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
| mit |
rishikksh20/scikit-learn | sklearn/metrics/tests/test_ranking.py | 46 | 41270 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
# This test was expanded (added scaled_down) in response to github
# issue #3864 (and others), where overly aggressive rounding was causing
# problems for users with very small y_score values
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled_up)
assert_equal(roc_auc, roc_auc_scaled_down)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred)
pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled_up)
assert_equal(pr_auc, pr_auc_scaled_down)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
vendi12/MemN2N-tableQA | demo/qa.py | 1 | 20171 | """
Demo of using Memory Network for question answering
"""
import glob
import os
import gzip
import sys
import pickle
import argparse
import numpy as np
# SV
import fasttext
from sklearn.metrics.pairwise import cosine_similarity
from config import BabiConfigJoint
from train_test import train, train_linear_start, test
from util import parse_babi_task, build_model, NWORDS, NSTORIES, NSENTENCES
# EMBEDDINGS_MODEL_PATH = '../fastText/result/fil9.bin'
EMBEDDINGS_MODEL_PATH = 'embeddings/fil9.bin'
SIM_FASTTEXT_THRESHOLD = 0.8
class MemN2N(object):
"""
MemN2N class
"""
def __init__(self, data_dir, model_file, dataset='sim'):
# specify pre-trained models to load in the interface
# and the sample dataset for evaluation: test sim synth table
self.data_dir = data_dir
self.data_path = './data/%s_data_{}.txt' % dataset
self.model_file = './trained_model/memn2n_table_qa_model_%s.pklz' % dataset
# self.model_file = model_file
self.reversed_dict = None
self.memory = None
self.model = None
self.loss = None
self.general_config = None
# SV load model to embed OOV words
print("Loading word embeddings model")
self.word_model = fasttext.load_model(EMBEDDINGS_MODEL_PATH)
# SV keep word vectors for all the dictionary words
self.dict_vectors = {}
def save_model(self):
with gzip.open(self.model_file, "wb") as f:
print("Saving model to file %s ..." % self.model_file)
pickle.dump((self.reversed_dict, self.memory, self.model, self.loss, self.general_config), f)
def load_model(self):
# Check if model was loaded
if self.reversed_dict is None or self.memory is None or \
self.model is None or self.loss is None or self.general_config is None:
print("Loading MemNN-QA model from file %s ..." % self.model_file)
with gzip.open(self.model_file, "rb") as f:
self.reversed_dict, self.memory, self.model, self.loss, self.general_config = pickle.load(f)
def train(self):
"""
Train MemN2N model using training data for tasks.
"""
np.random.seed(42) # for reproducing
# assert self.data_dir is not None, "data_dir is not specified."
# print("Reading data from %s ..." % self.data_dir)
# Parse data
train_data_path = glob.glob(self.data_path.format('train'))
test_data_path = glob.glob(self.data_path.format('test'))
# Parse training data
# train_data_path = glob.glob('%s/qa*_*_train.txt' % self.data_dir)
# init dict with pre-trained vectors, e.g. from fastText
dictionary = {"nil": 0}
train_story, train_questions, train_qstory = parse_babi_task(train_data_path, dictionary, False)
# Parse test data just to expand the dictionary so that it covers all words in the test data too
# test_data_path = glob.glob('%s/qa*_*_test.txt' % self.data_dir)
parse_babi_task(test_data_path, dictionary, False)
# Get reversed dictionary mapping index to word
self.reversed_dict = dict((ix, w) for w, ix in dictionary.items())
# Construct model
self.general_config = BabiConfigJoint(train_story, train_questions, dictionary)
self.memory, self.model, self.loss = build_model(self.general_config)
# Train model
if self.general_config.linear_start:
train_linear_start(train_story, train_questions, train_qstory,
self.memory, self.model, self.loss, self.general_config)
else:
train(train_story, train_questions, train_qstory,
self.memory, self.model, self.loss, self.general_config)
# Save model
self.save_model()
def parse_babi_task(self, data_files, include_question):
""" Parse bAbI data. And expand the dictionary.
Args:
data_files (list): a list of data file's paths.
dictionary (dict): word's dictionary
include_question (bool): whether count question toward input sentence.
Returns:
A tuple of (story, questions, qstory):
story (3-D array)
[position of word in sentence, sentence index, story index] = index of word in dictionary
questions (2-D array)
[0-9, question index], in which the first component is encoded as follows:
0 - story index
1 - index of the last sentence before the question
2 - index of the answer word in dictionary
3 to 13 - indices of supporting sentence
14 - line index
qstory (2-D array) question's indices within a story
[index of word in question, question index] = index of word in dictionary
"""
# Try to reserve spaces beforehand (large matrices for both 1k and 10k data sets)
# print NWORDS, NSENTENCES, len(data_files)
story = np.zeros((NWORDS, NSENTENCES, len(data_files) * NSTORIES), np.int16)
questions = np.zeros((14, len(data_files) * 10000), np.int16)
qstory = np.zeros((NWORDS, len(data_files) * 10000), np.int16)
# NOTE: question's indices are not reset when going through a new story
story_idx, question_idx, sentence_idx, max_words, max_sentences = -1, -1, -1, 0, 0
# Mapping line number (within a story) to sentence's index (to support the flag include_question)
mapping = None
for fp in data_files:
with open(fp) as f:
for line_idx, line in enumerate(f):
line = line.rstrip().lower()
words = line.split()
# Story begins
if words[0] == '1':
story_idx += 1
sentence_idx = -1
mapping = []
# FIXME: This condition makes the code more fragile!
if '?' not in line:
is_question = False
sentence_idx += 1
else:
is_question = True
question_idx += 1
questions[0, question_idx] = story_idx
questions[1, question_idx] = sentence_idx
if include_question:
sentence_idx += 1
mapping.append(sentence_idx)
# Skip substory index
for k in range(1, len(words)):
w = words[k]
if w.endswith('.') or w.endswith('?'):
w = w[:-1]
if w not in self.general_config.dictionary:
self.general_config.dictionary[w] = len(self.general_config.dictionary)
if max_words < k:
max_words = k
# print sentence_idx, story_idx
if not is_question:
# look up word in a dictionary
story[k - 1, sentence_idx, story_idx] = self.general_config.dictionary[w]
else:
qstory[k - 1, question_idx] = self.general_config.dictionary[w]
if include_question:
story[k - 1, sentence_idx, story_idx] = self.general_config.dictionary[w]
# NOTE: Punctuation is already removed from w
if words[k].endswith('?'):
answer = words[k + 1]
if answer not in self.general_config.dictionary:
self.general_config.dictionary[answer] = len(self.general_config.dictionary)
questions[2, question_idx] = self.general_config.dictionary[answer]
# Indices of supporting sentences
for h in range(k + 2, len(words)):
questions[1 + h - k, question_idx] = mapping[int(words[h]) - 1]
questions[-1, question_idx] = line_idx
break
if max_sentences < sentence_idx + 1:
max_sentences = sentence_idx + 1
story = story[:max_words, :max_sentences, :(story_idx + 1)]
questions = questions[:, :(question_idx + 1)]
qstory = qstory[:max_words, :(question_idx + 1)]
print questions[1, 0]
return story, questions, qstory
def get_story_texts(self, test_story, test_questions, test_qstory,
question_idx, story_idx, last_sentence_idx):
"""
Get text of question, its corresponding fact statements.
"""
train_config = self.general_config.train_config
enable_time = self.general_config.enable_time
max_words = train_config["max_words"] \
if not enable_time else train_config["max_words"] - 1
story = [[self.reversed_dict[test_story[word_pos, sent_idx, story_idx]]
for word_pos in range(max_words)]
for sent_idx in range(last_sentence_idx + 1)]
# print story
question = [self.reversed_dict[test_qstory[word_pos, question_idx]]
for word_pos in range(max_words)]
story_txt = [" ".join([w.decode('latin-1') for w in sent if w != "nil"]) for sent in story]
question_txt = " ".join([w.decode('latin-1') for w in question if w != "nil"])
correct_answer = self.reversed_dict[test_questions[2, question_idx]].decode('latin-1')
return story_txt, question_txt, correct_answer
def predict_answer(self, test_story, test_questions, test_qstory,
question_idx, story_idx, last_sentence_idx,
user_question=''):
# Get configuration
nhops = self.general_config.nhops
train_config = self.general_config.train_config
batch_size = self.general_config.batch_size
dictionary = self.general_config.dictionary
enable_time = self.general_config.enable_time
max_words = train_config["max_words"] \
if not enable_time else train_config["max_words"] - 1
input_data = np.zeros((max_words, batch_size), np.float32)
# init with 0
input_data[:] = dictionary["nil"]
self.memory[0].data[:] = dictionary["nil"]
# Check if user provides questions and it's different from suggested question
_, suggested_question, _ = self.get_story_texts(test_story, test_questions, test_qstory,
question_idx, story_idx, last_sentence_idx)
user_question_provided = user_question != '' and user_question != suggested_question
encoded_user_question = None
dis_question = []
# new question different from test data
if user_question_provided:
# TODO seq2seq translation/projection model
# print("User question = '%s'" % user_question)
user_question = user_question.strip()
if user_question[-1] == '?':
user_question = user_question[:-1]
qwords = user_question.rstrip().lower().split() # skip '?'
# Encoding
encoded_user_question = np.zeros(max_words)
encoded_user_question[:] = dictionary["nil"]
qindex = 0
for ix, w in enumerate(qwords):
if w in dictionary:
print w
encoded_user_question[qindex] = dictionary[w]
qindex += 1
else:
print("WARNING - The word '%s' is not in dictionary." % w)
# SV deal with OOV words!
# look it up in fasttext
word_vector = self.word_model[w]
print 'fastText embedding:', word_vector
# resolve it with one of the vocabulary words
# iterate over and compare vector with each word in the dictionary
# init nn search
# TODO optimize cosine_similarity comparison on a matrix
nn = None
max_cosine = 0
for word, dict_vector in self.dict_vectors.items():
cosine = cosine_similarity(word_vector, dict_vector)[0][0]
if cosine > max_cosine:
nn = word
max_cosine = cosine
if max_cosine > SIM_FASTTEXT_THRESHOLD:
encoded_user_question[qindex] = dictionary[nn]
qindex += 1
# print w + ' recognized as ' + nn
dis_question.append(w.decode('latin-1') + ' recognized as ' + nn.decode('latin-1') + ' ' + "%.2f" % max_cosine)
# dis_question.append(w + ' recognized as ' + nn)
else:
dis_question.append(w.decode('latin-1') + ' is not recognized and ignored')
# Input data and data for the 1st memory cell
# Here we duplicate input_data to fill the whole batch
for b in range(batch_size):
d = test_story[:, :(1 + last_sentence_idx), story_idx]
offset = max(0, d.shape[1] - train_config["sz"])
d = d[:, offset:]
self.memory[0].data[:d.shape[0], :d.shape[1], b] = d
if enable_time:
self.memory[0].data[-1, :d.shape[1], b] = \
np.arange(d.shape[1])[::-1] + len(dictionary) # time words
if user_question_provided:
input_data[:test_qstory.shape[0], b] = encoded_user_question
else:
input_data[:test_qstory.shape[0], b] = test_qstory[:, question_idx]
# Data for the rest memory cells
for i in range(1, nhops):
self.memory[i].data = self.memory[0].data
# Run model to predict answer
out = self.model.fprop(input_data)
memory_probs = np.array([self.memory[i].probs[:(last_sentence_idx + 1), 0] for i in range(nhops)])
# Get answer for the 1st question since all are the same
pred_answer_idx = out[:, 0].argmax()
pred_prob = out[pred_answer_idx, 0]
return pred_answer_idx, pred_prob, memory_probs, dis_question
def train_model(data_dir, model_file):
memn2n = MemN2N(data_dir, model_file)
memn2n.train()
def run_console_demo(data_dir, model_file):
"""
Console-based demo
"""
memn2n = MemN2N(data_dir, model_file)
# Try to load model
memn2n.load_model()
train_dict_n = len(memn2n.general_config.dictionary)
print 'Dictionary size', len(memn2n.general_config.dictionary)
# Read test data
test_data_path = glob.glob('./data/sim_data_test.txt')
# test_data_path = glob.glob(memn2n.data_path.format('test'))
# load different dataset with samples
# test_data_path = glob.glob('./data/table_data_{}.txt'.format('test'))
print("Reading test data from %s ..." % test_data_path)
# print len(memn2n.general_config.dictionary)
test_story, test_questions, test_qstory = \
memn2n.parse_babi_task(test_data_path, False)
# SV expand reversed_dict with test data
print 'Dictionary size', len(memn2n.general_config.dictionary)
oov_n = len(memn2n.general_config.dictionary) - train_dict_n
# Get reversed dictionary mapping index to word
memn2n.reversed_dict = dict((ix, w) for w, ix in memn2n.general_config.dictionary.items())
# print memn2n.memory.emb_out
# memn2n.memory.emb_out
# memn2n.test(test_story, test_questions, test_qstory)
test(test_story, test_questions, test_qstory, memn2n.memory, memn2n.model, memn2n.loss, memn2n.general_config)
# while True:
# # Pick a random question
# question_idx = np.random.randint(test_questions.shape[1])
# story_idx = test_questions[0, question_idx]
# last_sentence_idx = test_questions[1, question_idx]
# # Get story and question
# story_txt, question_txt, correct_answer = memn2n.get_story_texts(test_story, test_questions, test_qstory,
# question_idx, story_idx, last_sentence_idx)
# print("* Story:")
# print("\n\t".join(story_txt))
# print("\n* Suggested question:\n\t%s?" % question_txt)
# while True:
# user_question = raw_input("Your question (press Enter to use the suggested question):\n\t")
# pred_answer_idx, pred_prob, memory_probs = \
# memn2n.predict_answer(test_story, test_questions, test_qstory,
# question_idx, story_idx, last_sentence_idx,
# user_question)
# pred_answer = memn2n.reversed_dict[pred_answer_idx]
# print("* Answer: '%s', confidence score = %.2f%%" % (pred_answer, 100. * pred_prob))
# if user_question == '':
# if pred_answer == correct_answer:
# print(" Correct!")
# else:
# print(" Wrong. The correct answer is '%s'" % correct_answer)
# print("\n* Explanation:")
# print("\t".join(["Memory %d" % (i + 1) for i in range(len(memory_probs))]) + "\tText")
# for sent_idx, sent_txt in enumerate(story_txt):
# prob_output = "\t".join(["%.3f" % mem_prob for mem_prob in memory_probs[:, sent_idx]])
# print("%s\t%s" % (prob_output, sent_txt))
# asking_another_question = raw_input("\nDo you want to ask another question? [y/N] ")
# if asking_another_question == '' or asking_another_question.lower() == 'n': break
# will_continue = raw_input("Do you want to continue? [Y/n] ")
# if will_continue != '' and will_continue.lower() != 'y': break
# print("=" * 70)
def run_web_demo(data_dir, model_file):
from demo.web import webapp
webapp.init(data_dir, model_file)
webapp.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data-dir", default="data/tasks_1-20_v1-2/en",
help="path to dataset directory (default: %(default)s)")
# parser.add_argument("-m", "--model-file", default="trained_model/memn2n_model.pklz",
parser.add_argument("-m", "--model-file", default="trained_model/memn2n_table_qa_model.pklz",
help="model file (default: %(default)s)")
group = parser.add_mutually_exclusive_group()
group.add_argument("-train", "--train", action="store_true",
help="train model (default: %(default)s)")
group.add_argument("-console", "--console-demo", action="store_true",
help="run console-based demo (default: %(default)s)")
group.add_argument("-web", "--web-demo", action="store_true", default=True,
help="run web-based demo (default: %(default)s)")
args = parser.parse_args()
# if not os.path.exists(args.data_dir):
# print("The data directory '%s' does not exist. Please download it first." % args.data_dir)
# sys.exit(1)
if args.train:
train_model(args.data_dir, args.model_file)
elif args.console_demo:
run_console_demo(args.data_dir, args.model_file)
else:
run_web_demo(args.data_dir, args.model_file)
| bsd-3-clause |
wzbozon/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
klocey/hydrobide | tools/SADfits/SADfits.py | 9 | 3639 | from __future__ import division
import matplotlib.pyplot as plt
import sys
import os
from random import shuffle
import numpy as np
########### PATHS ##############################################################
mydir = os.path.expanduser("~/GitHub/residence-time")
tools = os.path.expanduser(mydir + "/tools")
sys.path.append(tools + "/DiversityTools/macroeco_distributions")
import macroeco_distributions as md
sys.path.append(tools + "/DiversityTools/distributions")
import distributions as dist
sys.path.append(tools + "/DiversityTools/macroecotools")
import macroecotools as mct
sys.path.append(tools + "/metrics")
import metrics
sys.path.append(tools + "/DiversityTools/mete")
import mete
#sys.path.append(tools + "/pln")
#import pln
from scipy.stats.kde import gaussian_kde
from macroeco_distributions import pln, pln_solver
from numpy import empty
def get_kdens_choose_kernel(_list,kernel):
""" Finds the kernel density function across a sample of SADs """
density = gaussian_kde(_list)
n = len(_list)
xs = np.linspace(min(_list),max(_list),n)
#xs = np.linspace(0.0,1.0,n)
density.covariance_factor = lambda : kernel
density._compute_covariance()
D = [xs,density(xs)]
return D
def get_rad_pln(S, mu, sigma, lower_trunc = True):
"""Obtain the predicted RAD from a Poisson lognormal distribution"""
abundance = list(empty([S]))
rank = range(1, int(S) + 1)
cdf_obs = [(rank[i]-0.5) / S for i in range(0, int(S))]
j = 0
cdf_cum = 0
i = 1
while j < S:
cdf_cum += pln.pmf(i, mu, sigma, lower_trunc)
while cdf_cum >= cdf_obs[j]:
abundance[j] = i
j += 1
if j == S:
abundance.reverse()
return abundance
i += 1
def get_rad_from_obs(ab, dist):
mu, sigma = pln_solver(ab)
pred_rad = get_rad_pln(len(ab), mu, sigma)
return pred_rad
data = mydir + '/results/simulated_data/protected/RAD-Data.csv'
RADs = []
with open(data) as f:
for d in f:
d = list(eval(d))
sim = d.pop(0)
ct = d.pop(0)
if len(d) >= 10:
d = sorted(d, reverse=True)
RADs.append(d)
print 'Number of RADs:', len(RADs)
mete_r2s = []
zipf_r2s = []
pln_r2s = []
shuffle(RADs)
for i, obs in enumerate(RADs):
N = int(sum(obs))
S = int(len(obs))
print i, N, S, len(pln_r2s)
if S >= 10 and N > 50:
if N < 10000:
result = mete.get_mete_rad(S, N)
predRAD = result[0]
mete_r2 = mct.obs_pred_rsquare(np.array(obs), np.array(predRAD))
mete_r2s.append(mete_r2)
#zipf_pred = dist.zipf(obs)
#predRAD = zipf_pred.from_cdf()
#zipf_r2 = mct.obs_pred_rsquare(np.array(obs), np.array(predRAD))
#zipf_r2s.append(zipf_r2)
predRAD = get_rad_from_obs(obs, 'pln')
pln_r2 = mct.obs_pred_rsquare(np.array(obs), np.array(predRAD))
pln_r2s.append(pln_r2)
if len(pln_r2s) > 200: break
fig = plt.figure(111)
kernel = 0.5
D = get_kdens_choose_kernel(mete_r2s, kernel)
plt.plot(D[0],D[1],color = '0.3', lw=3, alpha = 0.99,label= 'METE')
#D = get_kdens_choose_kernel(zipf_r2s, kernel)
#plt.plot(D[0],D[1],color = 'c', lw=3, alpha = 0.99,label= 'Zipf')
D = get_kdens_choose_kernel(pln_r2s, kernel)
plt.plot(D[0],D[1],color = 'm', lw=3, alpha = 0.99, label= 'PLN')
plt.xlim(0.0, 1)
plt.legend(loc=2, fontsize=16)
plt.xlabel('$r$'+r'$^{2}$', fontsize=22)
plt.ylabel('$density$', fontsize=22)
plt.savefig(mydir + '/results/figures/SADfits.png', dpi=600, bbox_inches = "tight")
plt.close()
| mit |
Unidata/MetPy | v1.0/_downloads/0f93e682cc461be360e2fd037bf1fb7e/sigma_to_pressure_interpolation.py | 1 | 3485 | # Copyright (c) 2017,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===============================
Sigma to Pressure Interpolation
===============================
By using `metpy.calc.log_interp`, data with sigma as the vertical coordinate can be
interpolated to isobaric coordinates.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from netCDF4 import Dataset, num2date
from metpy.cbook import get_test_data
from metpy.interpolate import log_interpolate_1d
from metpy.plots import add_metpy_logo, add_timestamp
from metpy.units import units
######################################
# **Data**
#
# The data for this example comes from the outer domain of a WRF-ARW model forecast
# initialized at 1200 UTC on 03 June 1980. Model data courtesy Matthew Wilson, Valparaiso
# University Department of Geography and Meteorology.
data = Dataset(get_test_data('wrf_example.nc', False))
lat = data.variables['lat'][:]
lon = data.variables['lon'][:]
time = data.variables['time']
vtimes = num2date(time[:], time.units)
temperature = units.Quantity(data.variables['temperature'][:], 'degC')
pres = units.Quantity(data.variables['pressure'][:], 'Pa')
hgt = units.Quantity(data.variables['height'][:], 'meter')
####################################
# Array of desired pressure levels
plevs = [700.] * units.hPa
#####################################
# **Interpolate The Data**
#
# Now that the data is ready, we can interpolate to the new isobaric levels. The data is
# interpolated from the irregular pressure values for each sigma level to the new input
# mandatory isobaric levels. `mpcalc.log_interp` will interpolate over a specified dimension
# with the `axis` argument. In this case, `axis=1` will correspond to interpolation on the
# vertical axis. The interpolated data is output in a list, so we will pull out each
# variable for plotting.
height, temp = log_interpolate_1d(plevs, pres, hgt, temperature, axis=1)
####################################
# **Plotting the Data for 700 hPa.**
# Set up our projection
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
# Set the forecast hour
FH = 1
# Create the figure and grid for subplots
fig = plt.figure(figsize=(17, 12))
add_metpy_logo(fig, 470, 320, size='large')
# Plot 700 hPa
ax = plt.subplot(111, projection=crs)
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES, linewidth=0.5)
# Plot the heights
cs = ax.contour(lon, lat, height[FH, 0, :, :], transform=ccrs.PlateCarree(),
colors='k', linewidths=1.0, linestyles='solid')
cs.clabel(fontsize=10, inline=1, inline_spacing=7, fmt='%i', rightside_up=True,
use_clabeltext=True)
# Contour the temperature
cf = ax.contourf(lon, lat, temp[FH, 0, :, :], range(-20, 20, 1), cmap=plt.cm.RdBu_r,
transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Celsius', size='x-large')
ax.set_extent([-106.5, -90.4, 34.5, 46.75], crs=ccrs.PlateCarree())
# Make the axis title
ax.set_title(f'{plevs[0]:~.0f} Heights (m) and Temperature (C)', loc='center', fontsize=10)
# Set the figure title
fig.suptitle(f'WRF-ARW Forecast VALID: {vtimes[FH]} UTC', fontsize=14)
add_timestamp(ax, vtimes[FH], y=0.02, high_contrast=True)
plt.show()
| bsd-3-clause |
peterfpeterson/mantid | qt/python/mantidqt/gui_helper.py | 3 | 5994 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy.QtWidgets import (QApplication) # noqa
from qtpy import QtCore, QtGui
import matplotlib
import sys
import os
try:
from mantid import __version__ as __mtd_version
from mantid import _bindir as __mtd_bin_dir
# convert to major.minor
__mtd_version = '.'.join(__mtd_version.split(".")[:2])
except ImportError: # mantid not found
__mtd_version = ''
__mtd_bin_dir=''
def set_matplotlib_backend():
'''MUST be called before anything tries to use matplotlib
This will set the backend if it hasn't been already. It also returns
the name of the backend to be the name to be used for importing the
correct matplotlib widgets.'''
backend = matplotlib.get_backend()
if backend.startswith('module://'):
if backend.endswith('qt4agg'):
backend = 'Qt4Agg'
elif backend.endswith('workbench') or backend.endswith('qt5agg'):
backend = 'Qt5Agg'
else:
from qtpy import PYQT4, PYQT5 # noqa
if PYQT5:
backend = 'Qt5Agg'
elif PYQT4:
backend = 'Qt4Agg'
else:
raise RuntimeError('Do not know which matplotlib backend to set')
matplotlib.use(backend)
return backend
def get_qapplication():
''' Example usage:
app, within_mantid = get_qapplication()
reducer = eventFilterGUI.MainWindow() # the main ui class in this file
reducer.show()
if not within_mantid:
sys.exit(app.exec_())'''
app = QApplication.instance()
if app:
return app, app.applicationName().lower().startswith('mantid')
else:
return QApplication(sys.argv), False
def __to_external_url(interface_name: str, section: str, external_url: str) -> QtCore.QUrl:
if not external_url:
template = 'http://docs.mantidproject.org/nightly/interfaces/{}/{}.html'
external_url = template.format(section, interface_name)
return QtCore.QUrl(external_url)
def __to_qthelp_url(interface_name: str, section: str, qt_url: str) -> str:
if qt_url:
return qt_url
else:
template = 'qthelp://org.sphinx.mantidproject.{}/doc/interfaces/{}/{}.html'
return template.format(__mtd_version, section, interface_name)
def __get_collection_file(collection_file: str) -> str:
if not collection_file:
if not __mtd_bin_dir:
return 'HELP COLLECTION FILE NOT FOUND'
else:
collection_file = os.path.join(__mtd_bin_dir, '../docs/qthelp/MantidProject.qhc')
return os.path.abspath(collection_file)
def show_interface_help(mantidplot_name, assistant_process, area: str='',
collection_file: str='',
qt_url: str='', external_url: str=""):
''' Shows the help page for a custom interface
@param mantidplot_name: used by showCustomInterfaceHelp
@param assistant_process: needs to be started/closed from outside (see example below)
@param collection_file: qth file containing the help in format used by qtassistant. The default is
``mantid._bindir + '../docs/qthelp/MantidProject.qhc'``
@param qt_url: location of the help in the qth file. The default value is
``qthelp://org.sphinx.mantidproject.{mtdversion}/doc/interfaces/{mantidplot_name}.html``.
@param external_url: location of external page to be displayed in the default browser. The default value is
``http://docs.mantidproject.org/nightly/interfaces/framework/{mantidplot_name}.html``
Example using defaults:
#in the __init__ function of the GUI add:
self.assistant_process = QtCore.QProcess(self)
self.mantidplot_name='DGS Planner'
#add a help function in the GUI
def help(self):
show_interface_help(self.mantidplot_name,
self.assistant_process)
#make sure you close the qtassistant when the GUI is closed
def closeEvent(self, event):
self.assistant_process.close()
self.assistant_process.waitForFinished()
event.accept()
'''
try:
# try using built-in help in mantid
import mantidqt
mantidqt.interfacemanager.InterfaceManager().showCustomInterfaceHelp(mantidplot_name, area)
except: #(ImportError, ModuleNotFoundError) raises the wrong type of error
# built-in help failed, try external qtassistant then give up and launch a browser
# cleanup previous version
assistant_process.close()
assistant_process.waitForFinished()
# where to expect qtassistant
helpapp = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator()
helpapp += 'assistant'
collection_file = __get_collection_file(collection_file)
if os.path.isfile(helpapp) and os.path.isfile(collection_file):
# try to find the collection file and launch qtassistant
args = ['-enableRemoteControl',
'-collectionFile', collection_file,
'-showUrl', __to_qthelp_url(mantidplot_name, area, qt_url)]
assistant_process.close()
assistant_process.waitForFinished()
assistant_process.start(helpapp, args)
else:
# give up and upen a URL in default browser
openUrl=QtGui.QDesktopServices.openUrl
sysenv=QtCore.QProcessEnvironment.systemEnvironment()
ldp=sysenv.value('LD_PRELOAD')
if ldp:
del os.environ['LD_PRELOAD']
# create a url to the help in the default location
openUrl(__to_external_url(mantidplot_name, area, external_url))
if ldp:
os.environ['LD_PRELOAD']=ldp
| gpl-3.0 |
CCI-Tools/ect-core | test/ops/test_index.py | 2 | 15866 | """
Tests for index operations
"""
from unittest import TestCase
import os
import sys
from datetime import datetime
import tempfile
import shutil
from contextlib import contextmanager
import itertools
import xarray as xr
import pandas as pd
import numpy as np
from cate.ops import index
from cate.core.op import OP_REGISTRY
from cate.util.misc import object_to_qualified_name
def assert_dataset_equal(expected, actual):
# this method is functionally equivalent to
# `assert expected == actual`, but it checks each aspect
# of equality separately for easier debugging
assert expected.equals(actual), (expected, actual)
_counter = itertools.count()
ON_WIN = sys.platform == 'win32'
@contextmanager
def create_tmp_file():
tmp_dir = tempfile.mkdtemp()
path = os.path.join(tmp_dir, 'tmp_file_{}.nc'.format(next(_counter)))
try:
yield path
finally:
try:
shutil.rmtree(tmp_dir)
except OSError:
if not ON_WIN:
raise
class TestEnsoNino34(TestCase):
def test_nominal(self):
"""
Test ENSO index calculation using Nino34 region
"""
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)]
+ [datetime(2002, x, 1) for x in range(1, 13)])})
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1, 13)]})
lta = 2 * lta
expected_time = ([datetime(2001, x, 1) for x in range(3, 13)]
+ [datetime(2002, x, 1) for x in range(1, 11)])
expected = pd.DataFrame(data=(np.ones([20]) * -1),
columns=['ENSO N3.4 Index'],
index=expected_time)
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
actual = index.enso_nino34(dataset, 'first', tmp_file)
self.assertTrue(expected.equals(actual))
def test_threshold(self):
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)]
+ [datetime(2002, x, 1) for x in range(1, 13)])})
lta1 = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 4])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 4])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1, 5)]})
lta2 = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 4]) * 2),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 4]) * 2),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(5, 9)]})
lta3 = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 4]) * 0),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 4]) * 0),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(9, 13)]})
lta = xr.concat([lta1, lta2, lta3], dim='time')
expected_time = ([datetime(2001, x, 1) for x in range(3, 13)]
+ [datetime(2002, x, 1) for x in range(1, 11)])
data = [-0.2, -0.4, -0.6, -0.8, -0.6, -0.2, 0.2, 0.6, 0.8, 0.6, 0.4,
0.2, -0.2, -0.4, -0.6, -0.8, -0.6, -0.2, 0.2, 0.6]
expected = pd.DataFrame(data=data,
columns=['ENSO N3.4 Index'],
index=expected_time)
expected['El Nino'] = pd.Series(np.zeros([20], dtype=bool),
index=expected.index)
expected['La Nina'] = pd.Series(np.zeros([20], dtype=bool),
index=expected.index)
expected.loc[7:10, 'El Nino'] = True
expected.loc[19:20, 'El Nino'] = True
expected.loc[2:5, 'La Nina'] = True
expected.loc[14:17, 'La Nina'] = True
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
actual = index.enso_nino34(dataset, 'first', tmp_file,
threshold=0.5)
print(expected)
print(actual)
self.assertTrue(expected.equals(actual))
def test_registered(self):
reg_op = OP_REGISTRY.get_op(object_to_qualified_name(index.enso_nino34))
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)]
+ [datetime(2002, x, 1) for x in range(1, 13)])})
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1, 13)]})
lta = 2 * lta
expected_time = ([datetime(2001, x, 1) for x in range(3, 13)]
+ [datetime(2002, x, 1) for x in range(1, 11)])
expected = pd.DataFrame(data=(np.ones([20]) * -1),
columns=['ENSO N3.4 Index'],
index=expected_time)
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
actual = reg_op(ds=dataset, var='first', file=tmp_file)
self.assertTrue(expected.equals(actual))
class TestEnso(TestCase):
def test_nominal(self):
"""
Test nominal execution of the generic ENSO Index calculation operation
"""
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)] + [datetime(2002, x, 1) for x in range(1, 13)])})
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1, 13)]})
lta = 2 * lta
expected_time = ([datetime(2001, x, 1) for x in range(3, 13)]
+ [datetime(2002, x, 1) for x in range(1, 11)])
expected = pd.DataFrame(data=(np.ones([20]) * -1),
columns=['ENSO N3 Index'],
index=expected_time)
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
actual = index.enso(dataset, 'first', tmp_file, region='N3')
self.assertTrue(expected.equals(actual))
def test_antimeridian(self):
"""
Test execution with N4 region that crosses the antimeridian
"""
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)]
+ [datetime(2002, x, 1) for x in range(1, 13)])})
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1, 13)]})
lta = 2 * lta
expected_time = ([datetime(2001, x, 1) for x in range(3, 13)]
+ [datetime(2002, x, 1) for x in range(1, 11)])
expected = pd.DataFrame(data=(np.ones([20]) * -1),
columns=['ENSO N4 Index'],
index=expected_time)
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
actual = index.enso(dataset, 'first', tmp_file, region='N4')
self.assertTrue(expected.equals(actual))
def test_custom_region(self):
"""
Test execution with a generic WKT poygon
"""
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)]
+ [datetime(2002, x, 1) for x in range(1, 13)])})
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1, 13)]})
lta = 2 * lta
expected_time = ([datetime(2001, x, 1) for x in range(3, 13)]
+ [datetime(2002, x, 1) for x in range(1, 11)])
expected = pd.DataFrame(data=(np.ones([20]) * -1),
columns=['ENSO Index over POLYGON '
'((-141.15234375 3.513421045640057, -129.0234375 6.839169626342807,'
' -102.65625 6.489983332670652, -90.703125 -3.688855143147035, -110'
'.21484375 -13.06877673435769, -141.6796875 -6.31529853833002, -141'
'.15234375 3.513421045640057))'],
index=expected_time)
region = str('POLYGON((-141.15234375 3.513421045640057,-129.0234375'
' 6.839169626342807,-102.65625 6.4899833326706515,-90.703125 '
'-3.6888551431470353,-110.21484375 -13.068776734357693,'
'-141.6796875 -6.31529853833002,-141.15234375 '
'3.513421045640057))')
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
actual = index.enso(dataset, 'first', tmp_file, region='custom',
custom_region=region)
self.assertTrue(expected.equals(actual))
# Test a situation where the user forgets to provide the custom region
with self.assertRaises(ValueError) as err:
index.enso(dataset, 'first', 'dummy/file.nc', region='custom')
self.assertIn('No region', str(err.exception))
def test_registered(self):
"""
Test execution as a registered operation.
"""
reg_op = OP_REGISTRY.get_op(object_to_qualified_name(index.enso))
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)]
+ [datetime(2002, x, 1) for x in range(1, 13)])})
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1, 13)]})
lta = 2 * lta
expected_time = ([datetime(2001, x, 1) for x in range(3, 13)]
+ [datetime(2002, x, 1) for x in range(1, 11)])
expected = pd.DataFrame(data=(np.ones([20]) * -1),
columns=['ENSO N3 Index'],
index=expected_time)
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
actual = reg_op(ds=dataset, var='first', file=tmp_file, region='N3')
self.assertTrue(expected.equals(actual))
class TestOni(TestCase):
def test_nominal(self):
"""
Test nominal ONI Index calculation execution
"""
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)]
+ [datetime(2002, x, 1) for x in range(1, 13)])})
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1, 13)]})
lta = 2 * lta
expected_time = ([datetime(2001, x, 1) for x in range(2, 13)]
+ [datetime(2002, x, 1) for x in range(1, 12)])
expected = pd.DataFrame(data=(np.ones([22]) * -1),
columns=['ONI Index'],
index=expected_time)
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
actual = index.oni(dataset, 'first', tmp_file)
self.assertTrue(expected.equals(actual))
def test_registered(self):
"""
Test nominal execution of ONI Index calculation, as a registered
operation.
"""
reg_op = OP_REGISTRY.get_op(object_to_qualified_name(index.oni))
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)]
+ [datetime(2002, x, 1) for x in range(1, 13)])})
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1, 13)]})
lta = 2 * lta
expected_time = ([datetime(2001, x, 1) for x in range(2, 13)]
+ [datetime(2002, x, 1) for x in range(1, 12)])
expected = pd.DataFrame(data=(np.ones([22]) * -1),
columns=['ONI Index'],
index=expected_time)
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
actual = reg_op(ds=dataset, var='first', file=tmp_file)
self.assertTrue(expected.equals(actual))
| mit |
pompiduskus/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 130 | 50966 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
ldirer/scikit-learn | examples/covariance/plot_sparse_cov.py | 29 | 5079 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores_, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
RodenLuo/LSolver | augment_nodule.py | 1 | 3169 | # coding: utf-8
import sys
import numpy as np # linear algebra
subset = sys.argv[1]
crop_window_len = np.int(sys.argv[2])
saving_mm_name = str(crop_window_len * 2 +1) + 'mm'
import cv2
from skimage import segmentation
from sklearn.cluster import DBSCAN
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import skimage, os
from skimage.morphology import ball, disk, dilation, binary_erosion, remove_small_objects, erosion, closing, reconstruction, binary_closing
from skimage.measure import label,regionprops, perimeter
from skimage.morphology import binary_dilation, binary_opening
from skimage.filters import roberts, sobel
from skimage import measure, feature
from skimage.segmentation import clear_border
# from skimage import data
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import dicom
import scipy.misc
import numpy as np
from skimage.segmentation import clear_border
from skimage.feature import peak_local_max
from scipy.ndimage.interpolation import rotate
import os
import numpy
import array
def save_nodule(nodule_crop, name_, path):
np.save(path + str(name_) + '.npy', nodule_crop)
import SimpleITK as sitk
import numpy as np
from glob import glob
import pandas as pd
import scipy.ndimage
## Read annotation data and filter those without images
# Learned from Jonathan Mulholland and Aaron Sander, Booz Allen Hamilton
# https://www.kaggle.com/c/data-science-bowl-2017#tutorial
# Predefine some parameters, this will affect final performance
low_cutoff = -650
jitter_number = 120
rotate_number = 360; num_per_direction = np.int(rotate_number/6);
# Set input path
nodule_path = '/LUNA16/Train_data/' + saving_mm_name + '/' + subset + '/nodule/'
nodule_list = glob(nodule_path + "*.npy")
saving_path = '/LUNA16/Train_data/' + saving_mm_name + '/' + subset + '/augment_nodule/'
if not os.path.exists(saving_path):
os.makedirs(saving_path)
def augment_nodule(nodule_npy_path):
nodule_crop = np.load(nodule_npy_path)
nodule_name = str(os.path.split(nodule_npy_path)[1]).replace('.npy', '')
i = 0
for ax_1 in range(3):
for ax_2 in range(3):
if not ax_2 == ax_1:
random_angle = np.random.uniform(0, 360, num_per_direction)
for angle in random_angle:
i+=1
nodule_crop_r = rotate(nodule_crop, angle, axes=(ax_1, ax_2), reshape=False, mode='reflect')
save_nodule(nodule_crop_r, nodule_name + '_rotate_' + str(i), saving_path)
for idx in range(jitter_number):
random_jitter = np.rint(np.random.uniform(-5, 5, [crop_window_len * 2 +1, crop_window_len * 2 +1, crop_window_len * 2 +1]))
nodule_crop_j = np.array(nodule_crop + random_jitter, dtype=np.int16)
save_nodule(nodule_crop_j, nodule_name + '_jitter_' + str(idx), saving_path)
from joblib import Parallel, delayed
import multiprocessing
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(augment_nodule)(nodule_path) for nodule_path in nodule_list)
print('Done for all')
| mit |
kadubarbosa/hydra1 | plot_lick_radius.py | 1 | 19507 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 12 10:24:35 2013
@author: cbarbosa
Program to produce plots of Lick indices in 1D, comparing with results from
Coccato et al. 2011
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.gridspec as gridspec
from scipy.optimize import curve_fit
from scipy.interpolate import LinearNDInterpolator as interpolator
from matplotlib.colors import Normalize
from scipy import ndimage
import brewer2mpl
from config import *
from mcmc_model import get_model_lims
import newcolorbars as nc
class Ssp:
""" Wrapper for the interpolated model."""
def __init__(self, model_table, indices=np.arange(25)):
self.interpolate(model_table)
self.indices = indices
def interpolate(self, model_table):
modeldata = np.loadtxt(model_table, dtype=np.double)
self.model = interpolator(modeldata[:,:3], modeldata[:,3:])
def fn(self, age, metallicity, alpha):
return self.model(age, metallicity, alpha)[self.indices]
def __call__(self, pars):
return self.fn(*pars)
def get_model_range(table):
""" Get the range for the indices according to models. """
modeldata = np.loadtxt(table)
indices = modeldata[:,3:].T
ranges = np.zeros((len(indices), 2))
for i, index in enumerate(indices):
ranges[i] = [index.min(), index.max()]
return ranges
def line(x, zp, grad ):
return zp + grad * x
def mask_slits():
data = np.loadtxt("results.tab", dtype=str)
mask = ["inn1_s22", "inn1_s25", "inn1_s27", "out1_s19", "out1_s20",
"out1_s21", "out1_s22","out1_s23", "out1_s24", "out1_s25",
"out1_s26", "inn2_s39", "cen1_s14", "cen2_s15", "out2_s22",
"out2_s29", ]
# mask = ["inn1_s22", "inn1_s25", "inn1_s27", "out1_s19", "out1_s20",
# "out1_s21", "out1_s22","out1_s23", "out1_s24", "out1_s25",
# "out1_s26", "inn2_s39", "cen1_s14", "cen2_s15", "inn2_s34",
# "out1_s18", "cen1_s35", "cen2_s23", ]
mask = np.array(["fin1_n3311{0}.fits".format(x) for x in mask])
mask = data[~np.in1d(data[:,0], mask)]
np.savetxt("results_masked.tab", mask, fmt="%s")
return "results_masked.tab"
def movingrms(x, y, window_size=10):
a = np.column_stack((x,y))
a = a[np.argsort(a[:,0])]
window = np.ones(window_size)/float(window_size)
rms = np.sqrt(np.convolve(a[:,1], window, 'same'))
rms = ndimage.filters.gaussian_filter(rms, 2.5)
b = np.column_stack((a[:,0], rms))
b = b[~np.isnan(rms)]
return b
if __name__ == "__main__":
model_table = os.path.join(tables_dir, "models_thomas_2010.dat")
ssp = Ssp(model_table)
restrict_pa = 0
log = True
pc = 1
r_tran = np.log10( 8.4 / re)
plt.ioff()
model_table = os.path.join(tables_dir, "models_thomas_2010.dat")
ranges = get_model_range(model_table)
ii = [12,13,16,17,18,19]
ranges = np.array([ranges[i] for i in ii ])
os.chdir(os.path.join(home, "single2"))
indices = [r"H$\beta$ [$\AA$]", r"Fe5015 [$\AA$]", r"Mg $b$ [$\AA$]",
r"Fe5270 [$\AA$]",r"Fe5335 [$\AA$]",r"Fe5406 [$\AA$]",
r"Fe5709 [$\AA$]"]
lodo_table = os.path.join(tables_dir, "coccato2011_indices.tsv")
lodo = np.loadtxt(lodo_table, usecols=(1,3,5,7,9,11,13,15))
lodoerr = np.loadtxt(lodo_table, usecols=(1,4,6,8,10,12,14,16))
with open(lodo_table) as f:
header = f.readline() [:-1]
# Converting radius to effective units
lodo[:,0] /= (4.125 * re)
if log:
lodo[:,0] = np.log10(lodo[:,0])
#############################
# Applying offsets from paper
lodo[:,1] += 0.11
lodo[:,3] += 0.13
#############################
# Calculating composite indices for Lodo's data
fe5270 = lodo[:,3]
fe5270_e = lodoerr[:,3]
fe5335 = lodo[:,4]
fe5335_e = lodoerr[:,4]
mgb = lodo[:,2]
mgb_e = lodoerr[:,2]
meanfe = 0.5 * (fe5270 + fe5335)
meanfeerr = 0.5 * np.sqrt(fe5270_e**2 + fe5335_e**2)
term = (0.72 * fe5270 + 0.28 * fe5335)
mgfeprime = np.sqrt(mgb * term)
mgfeprimeerr = 0.5 * np.sqrt(term / mgb * (mgb_e**2) +
mgb / term * ((0.72 * fe5270_e)**2 + (0.28 * fe5335_e)**2))
lodo2 = np.column_stack((lodo[:,0], lodo[:,1], lodo[:,3], meanfe,
mgfeprime))
lodo2err = np.column_stack((lodo[:,0], lodoerr[:,1], lodoerr[:,3],
meanfeerr, mgfeprimeerr))
objs = np.loadtxt(lodo_table, dtype = str, usecols=(0,))
lododata = np.loadtxt(lodo_table, usecols=np.arange(1,17))
outtable = np.column_stack((lododata, meanfe, meanfeerr,
mgfeprime, mgfeprimeerr))
outtable = np.around(outtable, decimals=4)
outtable = np.column_stack((objs, outtable))
header += "\t<Fe>\terr\t[MgFe]'\terr\n"
with open(os.path.join(tables_dir, "coccato2011.dat"), "w") as f:
f.write(header)
np.savetxt(f, outtable, fmt="%s")
################################
dwarf = lodo[-1]
lodo = lodo[:-1]
dwarferr = lodoerr[-1]
lodoerr = lodoerr[:-1]
dwarf2 = lodo2[-1]
lodo2 = lodo2[:-1]
dwarf2err = lodo2err[-1]
lodo2err = lodo2err[:-1]
##########################################################################
# Central values according to Loubser+ 2009
loubser = np.array([1.581, 5.03, 4.608, 2.773, 2.473, 1.532, 0.876])
loubser_err = np.array([0.111, 0.228, 0.091, 0.088, 0.099, 0.072, 0.05])
##########################################################################
# Data from Loubser + 2012
loubser12 = np.loadtxt("/home/kadu/Dropbox/hydra1/loubser2012/"
"lick_loubser2012.txt",
usecols=(0,13,14,17,18,19,20,21))
loubser12[:,0] += np.log10(26.6/re) #Scaling to our effective radius
loubser12_errs = np.loadtxt("/home/kadu/Dropbox/hydra1/loubser2012/"
"lick_loubser2012_errs.txt",
usecols=(0,13,14,17,18,19,20,21))
##########################################################################
# Mask table
results_masked = mask_slits()
##########################################################################
# Read data
r, pa, sn, mu = np.loadtxt(results_masked, usecols=(3,4,14,82)).T
r /= re # Normalization to effective radius
if log:
r = np.log10(r)
lick = np.loadtxt(results_masked, usecols=(39,41,47,49,51,53,55))
lickerr = np.loadtxt(results_masked, usecols=(40,42,48,50,52,54,56))
if restrict_pa:
good_pa = np.logical_or(np.logical_and(pa > 48, pa < 78), r < r_tran)
r = r[good_pa]
lick = lick[good_pa]
lickerr = lickerr[good_pa]
sn = sn[good_pa]
r = r[sn > sn_cut]
lick = np.transpose(lick[sn > sn_cut])
lickerr = np.transpose(lickerr[sn > sn_cut])
#########################################################################
# Bin data for gradients
if log:
rbinnum, redges = np.histogram(r, bins=8, range=(r_tran,r.max()))
else:
rbinnum, redges = np.histogram(r, bins=8, range=(10**(r_tran),10**.8))
data_r = []
rbins = []
errs_r = []
lick_masked = np.ma.array(lick, mask=np.isnan(lick))
lickerrs_masked = np.ma.array(lickerr, mask=np.isnan(lick))
for i, bin in enumerate(rbinnum):
idx = np.logical_and(r >= redges[i], r < redges[i+1])
if not len(np.where(idx)[0]):
continue
median = True
if median:
m = np.ma.median(lick_masked[:,idx].T, axis=0) # median
data_r.append(m)
rbins.append(np.ma.median(r[idx], axis=0))
else:
data_r.append(np.ma.average(lick_masked[:,idx].T, axis=0,
weights=np.power(10, -0.4*mu[idx])))
rbins.append(np.ma.average(r[idx], axis=0,
weights=np.power(10, -0.4*mu[idx])))
sigma_mad = 1.4826 * np.ma.median(np.abs(lick_masked[:,idx].T - m),
axis=0)
sigma = np.ma.std(lick_masked[:,idx].T, axis=0)
errs_r.append(sigma_mad)
data_r = np.array(data_r)
rbins = np.array(rbins)
errs_r = np.array(errs_r)
#########################################################################
# Taking only inner region for gradients in NGC 3311
if log:
idx3311 = np.where(r <= r_tran)[0]
idxhalo = np.where(r > r_tran)[0]
else:
idx3311 = np.where(r <= 10**(r_tran))[0]
idxhalo = np.where(r > 10**(r_tran))[0]
r3311 = r[idx3311]
rhalo = r[idxhalo]
lick3311 = lick[:,idx3311]
lickhalo = lick[:,idxhalo]
errs1_3311 = lickerr[:,idx3311]
errs_halo = lickerr[:,idxhalo]
#########################################################################
# First figure, simple indices
app = "_pa" if restrict_pa else ""
mkfig1 = True
gray = "0.75"
##########################################################################
lims, ranges = get_model_lims(os.path.join(tables_dir,
"models_thomas_2010_metal_extrapolated.dat"))
idx = np.array([12,13,16,17,18,19,20])
lims = lims[idx]
# Setting the colormap properties for the scatter plots
cmap = brewer2mpl.get_map('Blues', 'sequential', 9).mpl_colormap
cmap = nc.cmap_discretize(cmap, 3)
color = cm.get_cmap(cmap)
norm = Normalize(vmin=0, vmax=45)
if mkfig1:
plt.figure(1, figsize = (6, 14 ))
gs = gridspec.GridSpec(7,1)
gs.update(left=0.15, right=0.95, bottom = 0.1, top=0.94, wspace=0.1,
hspace=0.09)
tex = []
for j, ll in enumerate(lick):
# print indices[j], ranges[j], ssp.fn(9.,0.12,.4)[ii[j]]
if j == 0:
labels = ["This work", "Coccato et al. 2011",
"This work (binned)"]
else:
labels = [None, None, None]
notnans = ~np.isnan(ll)
ax = plt.subplot(gs[j])
ydata = ll[notnans]
ax.errorbar(r[notnans], ydata, yerr=lickerr[j][notnans],
fmt=None, color=gray, ecolor=gray, capsize=0, mec=gray,
ms=5.5, alpha=1, markerfacecolor="none",
mew=2, elinewidth=1 )
ax.scatter(r[notnans], ydata, c=sn[notnans], s=60, cmap=cmap, zorder=2,
lw=0.5, norm=norm, edgecolor="k")
ax.plot(1000, 1000, "o", mew=0.8, mec="k", c=color(0),
label=r"S/N $< 15$")
ax.plot(1000, 1000, "o", mew=0.8, mec="k", c=color(0.5),
label=r"$15\leq$ S/N $\leq 30$")
ax.plot(1000, 1000, "o", mew=0.8, c=color(1.), mec="k",
label=r"S/N $> 30$")
ax.errorbar(loubser12[:,0], loubser12[:,j+1],
yerr=loubser12_errs[:,j+1], color="r", ecolor="r",
fmt="s", mec="k", capsize=0, lw=0.2,
label= "Loubser et al. 2012", alpha=1, ms=7.5, mew=0.5)
ax.errorbar(lodo[:,0], lodo[:,j+1],
yerr = lodoerr[:,j+1],
fmt="^", c="orange", capsize=0, mec="k", ecolor="0.5",
label=labels[1], ms=8., alpha=1, lw=0.5, mew=0.5)
ax.errorbar(dwarf[0],
dwarf[j+1], yerr=dwarferr[j+1], fmt="^", c="orange",
capsize=0, mec="k", ecolor="0.5", ms=8., lw=0.5, mew=0.5)
plt.minorticks_on()
if j+1 != len(lick):
ax.xaxis.set_ticklabels([])
else:
plt.xlabel(r"$\log$ R / R$_{\mbox{e}}$")
plt.ylabel(indices[j])
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
if j == 0:
leg = ax.legend(prop={'size':11}, loc=2, ncol=2, fontsize=14,
scatterpoints = 1, frameon=False)
add = 0 if j != 0 else 2
sigma_mad = 1.48 * np.median(np.abs(ydata - np.median(ydata)))
ym = np.ceil(np.median(ydata)-4 * sigma_mad)
yp = np.floor(np.median(ydata)+4*sigma_mad+add)
ylim = plt.ylim(ym, yp)
##################################################################
# Measuring gradients
##################################################################
# NGC 3311
l = lick3311[j]
lerr = errs1_3311[j]
mask = ~np.isnan(l)
popt, pcov = curve_fit(line, r3311[mask], l[mask], sigma=lerr[mask])
pcov = np.sqrt(np.diagonal(pcov))
x = np.linspace(r.min(), r_tran, 100)
if not log:
x = 10**x
y = line(x, popt[0], popt[1])
lll, = ax.plot(x, y, "--k", lw=2, zorder=10000)
lll.set_dashes([10, 3])
# Including shades for +- 1%'
##################################################################
# Halo
values = lickhalo[j]
for k,v in enumerate(values):
if v <= lims[j][0] or v >= lims[j][1]:
values[k] = np.nan
mask = ~np.isnan(values)
l = lickhalo[j][mask]
lerr = errs_halo[j][mask]
popth, pcovh = curve_fit(line, rhalo[mask], l, sigma=lerr)
pcovh = np.sqrt(np.diagonal(pcovh))
x = np.linspace(r_tran, 0.7, 100)
if not log:
x = 10**x
y = line(x, popth[0], popth[1])
lll, = ax.plot(x, y, "-k", lw=1.5, zorder=10000)
lll.set_dashes([10, 3])
#################################################################
# Ploting rms 1%
for p, c in [[1,"0.3"]]:
tab = os.path.join(tables_dir,
"rms_{1}pc_lick_{0}.txt".format(j, p))
print tab
rms = np.loadtxt(os.path.join(tables_dir,
"rms_{1}pc_lick_{0}.txt".format(j, p)),
usecols=(0,1))
xrms, yrms = rms[rms[:,0] < r_tran].T
# ax.plot(xrms, yrms + line(xrms, popt[0], popt[1]), "-", c="0.5")
# ax.plot(xrms, -yrms + line(xrms, popt[0], popt[1]), "-", c="0.5")
ax.fill_between(xrms, yrms + line(xrms, popt[0], popt[1]),
line(xrms, popt[0], popt[1]) - yrms,
edgecolor="none", color=gray,
linewidth=0, alpha=1)
##################################################################
# Outer halo in bins
# popt2, pcov2 = curve_fit(line, rbins, data_r[:,j], sigma=errs_r[:,j])
# pcov2 = np.sqrt(np.diagonal(pcov2))
# x = np.linspace(r_tran, r.max(), 100)
# if not log:
# x = 10**x
# y = line(x, popt2[0], popt2[1])
# ax.plot(x, y, "--k", lw=2)
# ax.axvline(x=r_tran, c="k", ls="-.")
##################################################################
# Ploting rms 1%
# for p, c in [[1,"0.1"], [6, "0.8"]]:
for p, c in [[1,"0.1"]]:
rms = np.loadtxt(os.path.join(tables_dir,
"rms_{1}pc_lick_{0}.txt".format(j, p)),
usecols=(0,1))
xrms, yrms = rms[rms[:,0]>=r_tran].T
ax.fill_between(xrms, yrms + line(xrms, popth[0], popth[1]),
line(xrms, popth[0], popth[1]) - yrms,
edgecolor="none", color=gray,
linewidth=0, alpha=1)
##################################################################
# Draw arrows to indicate central limits
ax.annotate("", xy=(-1.12, loubser[j]), xycoords='data',
xytext=(-1.3, loubser[j]), textcoords='data',
arrowprops=dict(arrowstyle="<-", connectionstyle="arc3", ec="r",
lw=2))
##################################################################
ax.set_xlim(-1.35, 1.)
# ##################################################################
tex.append(r"{0} & {1[0]:.1f}$\pm${2[0]:.1f} & {1[1]:.1f}$\pm${2[1]:.1f}" \
r" & {3[0]:.1f}$\pm${4[0]:.1f} & {3[1]:.1f}$\pm${4[1]:.1f}""\\\\".format(
indices[j][:-7], popt, pcov, popth, pcovh))
print indices[j][:-7],
for m in [1,2,3]:
print np.abs(popt[1] - popth[1]) < m * (pcov[1]+pcovh[1]),
print
print "Saving new figure..."
plt.savefig("figs/lick_radius.png".format(pc), dpi=300,
bbox_inches="tight", transparent=False)
for t in tex:
print t
# plt.show(block=1)
# Making plots of Hbeta, Mgb, <Fe> and [MgFe]'
# r, pa, sn = np.loadtxt(results_masked, usecols=(3,4,14)).T
# # r /= re
# lick = np.loadtxt(results_masked, usecols=(39, 67, 80))
# lickerr = np.loadtxt(results_masked, usecols=(40, 68, 81))
# if restrict_pa:
# good_pa = np.logical_and(pa > 0, pa < 270)
# r = r[good_pa]
# lick = lick[good_pa]
# lickerr = lickerr[good_pa]
# sn = sn[good_pa]
# r = r[sn > sn_cut]
# lick = np.transpose(lick[sn > sn_cut])
# lickerr = np.transpose(lickerr[sn > sn_cut])
# gs2 = gridspec.GridSpec(len(lick),3)
# gs2.update(left=0.15, right=0.95, bottom = 0.1, top=0.94, hspace = 0.10,
# wspace=0.04)
# plt.figure(2, figsize = (6, 7))
# indices = [r"H$\beta$ [$\AA$]", r"[MgFe]'",
# r"$\mbox{Mg }b/\langle\mbox{Fe}\rangle$"]
#
# for j, (ll,lerr) in enumerate(zip(lick, lickerr)):
# ax = plt.subplot(gs2[j, 0:2], xscale="log")
# notnans = ~np.isnan(ll)
# ax.errorbar(r[notnans], ll[notnans], yerr=lerr[notnans],
# fmt="d", color="r",
# ecolor=gray, capsize=0, mec="k", markerfacecolor="none")
# # plt.errorbar(lodo2[:,0], lodo2[:,j+1],
# # yerr=lodo2err[:,j+1], fmt="+", c="b", capsize=0,
# # mec="b", ecolor="0.5", label=None, ms=10)
# # plt.errorbar(dwarf2[0],
# # dwarf2[j+1], yerr=dwarf2err[j+1], fmt="o",
# # c="w", capsize=0, mec="b", ecolor="0.5")
# plt.minorticks_on()
# if j != len(lick) -1 :
# ax.xaxis.set_ticklabels([])
# else:
# plt.xlabel(r"R (kpc)")
# ax.set_xticklabels(["0.1", "1", "10"])
# plt.ylabel(indices[j], fontsize=10)
# ax.yaxis.set_major_locator(plt.MaxNLocator(5))
# plt.ylim(ylims[j])
# # Histograms
# ax = plt.subplot(gs2[j, 2])
# plt.minorticks_on()
# ax.hist(ll[notnans], orientation="horizontal", color="r",
# ec="k")
# ax.yaxis.set_major_locator(plt.MaxNLocator(5))
# ax.xaxis.set_ticklabels([])
# ax.yaxis.set_ticklabels([])
# plt.ylim(ylims[j])
# plt.savefig("figs/lick_radius_combined.pdf",
# bbox_inches="tight", transparent=False) | gpl-2.0 |
MortalViews/incubator-airflow | tests/operators/hive_operator.py | 40 | 14061 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import datetime
import os
import unittest
import mock
import nose
import six
from airflow import DAG, configuration, operators
configuration.load_test_config()
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
if 'AIRFLOW_RUNALL_TESTS' in os.environ:
import airflow.hooks.hive_hooks
import airflow.operators.presto_to_mysql
class HiveServer2Test(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
self.nondefault_schema = "nondefault"
def test_select_conn(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
sql = "select 1"
hook = HiveServer2Hook()
hook.get_records(sql)
def test_multi_statements(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
sqls = [
"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)",
"DROP TABLE test_multi_statements",
]
hook = HiveServer2Hook()
hook.get_records(sqls)
def test_get_metastore_databases(self):
if six.PY2:
from airflow.hooks.hive_hooks import HiveMetastoreHook
hook = HiveMetastoreHook()
hook.get_databases()
def test_to_csv(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
sql = "select 1"
hook = HiveServer2Hook()
hook.to_csv(hql=sql, csv_filepath="/tmp/test_to_csv")
def connect_mock(self, host, port,
auth_mechanism, kerberos_service_name,
user, database):
self.assertEqual(database, self.nondefault_schema)
@mock.patch('HiveServer2Hook.connect', return_value="foo")
def test_select_conn_with_schema(self, connect_mock):
from airflow.hooks.hive_hooks import HiveServer2Hook
# Configure
hook = HiveServer2Hook()
# Run
hook.get_conn(self.nondefault_schema)
# Verify
self.assertTrue(connect_mock.called)
(args, kwargs) = connect_mock.call_args_list[0]
self.assertEqual(self.nondefault_schema, kwargs['database'])
def test_get_results_with_schema(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
from unittest.mock import MagicMock
# Configure
sql = "select 1"
schema = "notdefault"
hook = HiveServer2Hook()
cursor_mock = MagicMock(
__enter__=cursor_mock,
__exit__=None,
execute=None,
fetchall=[],
)
get_conn_mock = MagicMock(
__enter__=get_conn_mock,
__exit__=None,
cursor=cursor_mock,
)
hook.get_conn = get_conn_mock
# Run
hook.get_results(sql, schema)
# Verify
get_conn_mock.assert_called_with(self.nondefault_schema)
@mock.patch('HiveServer2Hook.get_results', return_value={'data': []})
def test_get_records_with_schema(self, get_results_mock):
from airflow.hooks.hive_hooks import HiveServer2Hook
# Configure
sql = "select 1"
hook = HiveServer2Hook()
# Run
hook.get_records(sql, self.nondefault_schema)
# Verify
self.assertTrue(self.connect_mock.called)
(args, kwargs) = self.connect_mock.call_args_list[0]
self.assertEqual(sql, args[0])
self.assertEqual(self.nondefault_schema, kwargs['schema'])
@mock.patch('HiveServer2Hook.get_results', return_value={'data': []})
def test_get_pandas_df_with_schema(self, get_results_mock):
from airflow.hooks.hive_hooks import HiveServer2Hook
# Configure
sql = "select 1"
hook = HiveServer2Hook()
# Run
hook.get_pandas_df(sql, self.nondefault_schema)
# Verify
self.assertTrue(self.connect_mock.called)
(args, kwargs) = self.connect_mock.call_args_list[0]
self.assertEqual(sql, args[0])
self.assertEqual(self.nondefault_schema, kwargs['schema'])
class HivePrestoTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.hql = """
USE airflow;
DROP TABLE IF EXISTS static_babynames_partitioned;
CREATE TABLE IF NOT EXISTS static_babynames_partitioned (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY (ds string);
INSERT OVERWRITE TABLE static_babynames_partitioned
PARTITION(ds='{{ ds }}')
SELECT state, year, name, gender, num FROM static_babynames;
"""
def test_hive(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='basic_hql', hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_queues(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='test_hive_queues', hql=self.hql,
mapred_queue='default', mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_hive_queues',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_dryrun(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag)
t.dry_run()
def test_beeline(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='beeline_hql', hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto(self):
sql = """
SELECT count(1) FROM airflow.static_babynames_partitioned;
"""
import airflow.operators.presto_check_operator
t = operators.presto_check_operator.PrestoCheckOperator(
task_id='presto_check', sql=sql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto_to_mysql(self):
import airflow.operators.presto_to_mysql
t = operators.presto_to_mysql.PrestoToMySqlTransfer(
task_id='presto_to_mysql_check',
sql="""
SELECT name, count(*) as ccount
FROM airflow.static_babynames
GROUP BY name
""",
mysql_table='test_static_babynames',
mysql_preoperator='TRUNCATE TABLE test_static_babynames;',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hdfs_sensor(self):
t = operators.sensors.HdfsSensor(
task_id='hdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_webhdfs_sensor(self):
t = operators.sensors.WebHdfsSensor(
task_id='webhdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
timeout=120,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_sql_sensor(self):
t = operators.sensors.SqlSensor(
task_id='hdfs_sensor_check',
conn_id='presto_default',
sql="SELECT 'x' FROM airflow.static_babynames LIMIT 1;",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_stats(self):
import airflow.operators.hive_stats_operator
t = operators.hive_stats_operator.HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': DEFAULT_DATE_DS},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_parses_partitions_with_periods(self):
t = operators.sensors.NamedHivePartitionSensor.parse_partition_name(
partition="schema.table/part1=this.can.be.an.issue/part2=ok")
self.assertEqual(t[0], "schema")
self.assertEqual(t[1], "table")
self.assertEqual(t[2], "part1=this.can.be.an.issue/part2=this_should_be_ok")
@nose.tools.raises(airflow.exceptions.AirflowSensorTimeout)
def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds=nonexistent"
],
poke_interval=0.1,
timeout=1,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_partition_sensor(self):
t = operators.sensors.HivePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_metastore_sql_sensor(self):
t = operators.sensors.MetastorePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
partition_name='ds={}'.format(DEFAULT_DATE_DS),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive2samba(self):
import airflow.operators.hive_to_samba_operator
t = operators.hive_to_samba_operator.Hive2SambaOperator(
task_id='hive2samba_check',
samba_conn_id='tableau_samba',
hql="SELECT * FROM airflow.static_babynames LIMIT 10000",
destination_filepath='test_airflow.csv',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_to_mysql(self):
import airflow.operators.hive_to_mysql
t = operators.hive_to_mysql.HiveToMySqlTransfer(
mysql_conn_id='airflow_db',
task_id='hive_to_mysql_check',
create=True,
sql="""
SELECT name
FROM airflow.static_babynames
LIMIT 100
""",
mysql_table='test_static_babynames',
mysql_preoperator=[
'DROP TABLE IF EXISTS test_static_babynames;',
'CREATE TABLE test_static_babynames (name VARCHAR(500))',
],
dag=self.dag)
t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
| apache-2.0 |
elijah513/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
probml/pyprobml | scripts/beta_binom_post_pred_plot.py | 1 | 1248 |
# Plots the posterior and plugin predictives for the Beta-Binomial distribution.
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = os.path.join(os.environ["PYPROBML"], "figures")
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
from scipy.special import comb, beta
from scipy.stats import binom
N = 10 # Future sample size M
# Hyperparameters
a = 1
b = 1
N1 = 4
N0 = 1
ind = np.arange(N+1)
post_a = a + N1
post_b = b + N0
# Compound beta-binomial distribution
distribution = []
for k in range(N+1):
distribution.append(comb(N,k) * beta(k+post_a, N-k+post_b) / beta(post_a, post_b))
fig,ax = plt.subplots()
rects = ax.bar(ind, distribution, align='center')
ax.set_title('posterior predictive')
ax.set_xticks(list(range(N+1)))
ax.set_xticklabels(list(range(N+1)))
save_fig('BBpostpred.pdf')
plt.show()
# Plugin binomial distribution
mu = (post_a - 1) / float(post_a + post_b - 2) # MAP estimate
distribution = []
rv = binom(N, mu)
for k in range(N+1):
distribution.append(rv.pmf(k))
fig,ax = plt.subplots()
rects = ax.bar(ind, distribution, align='center')
ax.set_title('plugin predictive')
ax.set_xticks(list(range(N+1)))
ax.set_xticklabels(list(range(N+1)))
save_fig('BBpluginpred.pdf')
plt.show()
| mit |
liangz0707/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
oesteban/mriqc | mriqc/reports/group.py | 1 | 7630 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# pylint: disable=no-member
#
# @Author: oesteban
# @Date: 2016-01-05 11:33:39
# @Email: [email protected]
# @Last modified by: oesteban
""" Encapsulates report generation functions """
from sys import version_info
import pandas as pd
from .. import logging
from ..utils.misc import BIDS_COMP
from builtins import object # pylint: disable=W0622
from io import open
MRIQC_REPORT_LOG = logging.getLogger('mriqc.report')
def gen_html(csv_file, mod, csv_failed=None, out_file=None):
import os.path as op
import datetime
from pkg_resources import resource_filename as pkgrf
from .. import __version__ as ver
from ..data import GroupTemplate
if version_info[0] > 2:
from io import StringIO as TextIO
else:
from io import BytesIO as TextIO
QCGROUPS = {
'T1w': [
(['cjv'], None),
(['cnr'], None),
(['efc'], None),
(['fber'], None),
(['wm2max'], None),
(['snr_csf', 'snr_gm', 'snr_wm'], None),
(['snrd_csf', 'snrd_gm', 'snrd_wm'], None),
(['fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'vox'),
(['qi_1', 'qi_2'], None),
(['inu_range', 'inu_med'], None),
(['icvs_csf', 'icvs_gm', 'icvs_wm'], None),
(['rpve_csf', 'rpve_gm', 'rpve_wm'], None),
(['tpm_overlap_csf', 'tpm_overlap_gm', 'tpm_overlap_wm'], None),
(['summary_bg_mean', 'summary_bg_median', 'summary_bg_stdv', 'summary_bg_mad',
'summary_bg_k', 'summary_bg_p05', 'summary_bg_p95'], None),
(['summary_csf_mean', 'summary_csf_median', 'summary_csf_stdv', 'summary_csf_mad',
'summary_csf_k', 'summary_csf_p05', 'summary_csf_p95'], None),
(['summary_gm_mean', 'summary_gm_median', 'summary_gm_stdv', 'summary_gm_mad',
'summary_gm_k', 'summary_gm_p05', 'summary_gm_p95'], None),
(['summary_wm_mean', 'summary_wm_median', 'summary_wm_stdv', 'summary_wm_mad',
'summary_wm_k', 'summary_wm_p05', 'summary_wm_p95'], None)
],
'T2w': [
(['cjv'], None),
(['cnr'], None),
(['efc'], None),
(['fber'], None),
(['wm2max'], None),
(['snr_csf', 'snr_gm', 'snr_wm'], None),
(['snrd_csf', 'snrd_gm', 'snrd_wm'], None),
(['fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'mm'),
(['qi_1', 'qi_2'], None),
(['inu_range', 'inu_med'], None),
(['icvs_csf', 'icvs_gm', 'icvs_wm'], None),
(['rpve_csf', 'rpve_gm', 'rpve_wm'], None),
(['tpm_overlap_csf', 'tpm_overlap_gm', 'tpm_overlap_wm'], None),
(['summary_bg_mean', 'summary_bg_stdv', 'summary_bg_k',
'summary_bg_p05', 'summary_bg_p95'], None),
(['summary_csf_mean', 'summary_csf_stdv', 'summary_csf_k',
'summary_csf_p05', 'summary_csf_p95'], None),
(['summary_gm_mean', 'summary_gm_stdv', 'summary_gm_k',
'summary_gm_p05', 'summary_gm_p95'], None),
(['summary_wm_mean', 'summary_wm_stdv', 'summary_wm_k',
'summary_wm_p05', 'summary_wm_p95'], None)
],
'bold': [
(['efc'], None),
(['fber'], None),
(['fwhm', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'mm'),
(['gsr_%s' % a for a in ['x', 'y']], None),
(['snr'], None),
(['dvars_std', 'dvars_vstd'], None),
(['dvars_nstd'], None),
(['fd_mean'], 'mm'),
(['fd_num'], '# timepoints'),
(['fd_perc'], '% timepoints'),
(['spikes_num'], '# slices'),
(['dummy_trs'], '# TRs'),
(['gcor'], None),
(['tsnr'], None),
(['aor'], None),
(['aqi'], None),
(['summary_bg_mean', 'summary_bg_stdv', 'summary_bg_k',
'summary_bg_p05', 'summary_bg_p95'], None),
(['summary_fg_mean', 'summary_fg_stdv', 'summary_fg_k',
'summary_fg_p05', 'summary_fg_p95'], None),
]
}
if csv_file.suffix == '.csv':
def_comps = list(BIDS_COMP.keys())
dataframe = pd.read_csv(csv_file, index_col=False,
dtype={comp: object for comp in def_comps})
id_labels = list(set(def_comps) & set(dataframe.columns.ravel().tolist()))
dataframe['label'] = dataframe[id_labels].apply(_format_labels, args=(id_labels,),
axis=1)
else:
dataframe = pd.read_csv(csv_file, index_col=False, sep='\t',
dtype={'bids_name': object})
dataframe = dataframe.rename(index=str, columns={'bids_name': 'label'})
nPart = len(dataframe)
failed = None
if csv_failed is not None and op.isfile(csv_failed):
MRIQC_REPORT_LOG.warning('Found failed-workflows table "%s"', csv_failed)
failed_df = pd.read_csv(csv_failed, index_col=False)
cols = list(set(id_labels) & set(failed_df.columns.ravel().tolist()))
try:
failed_df = failed_df.sort_values(by=cols)
except AttributeError:
failed_df = failed_df.sort(columns=cols)
# myfmt not defined
# failed = failed_df[cols].apply(myfmt, args=(cols,), axis=1).ravel().tolist()
csv_groups = []
datacols = dataframe.columns.ravel().tolist()
for group, units in QCGROUPS[mod]:
dfdict = {'iqm': [], 'value': [], 'label': [], 'units': []}
for iqm in group:
if iqm in datacols:
values = dataframe[[iqm]].values.ravel().tolist()
if values:
dfdict['iqm'] += [iqm] * nPart
dfdict['units'] += [units] * nPart
dfdict['value'] += values
dfdict['label'] += dataframe[['label']].values.ravel().tolist()
# Save only if there are values
if dfdict['value']:
csv_df = pd.DataFrame(dfdict)
csv_str = TextIO()
csv_df[['iqm', 'value', 'label', 'units']].to_csv(csv_str, index=False)
csv_groups.append(csv_str.getvalue())
if out_file is None:
out_file = op.abspath('group.html')
tpl = GroupTemplate()
tpl.generate_conf({
'modality': mod,
'timestamp': datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
'version': ver,
'csv_groups': csv_groups,
'failed': failed,
'boxplots_js': open(pkgrf('mriqc', op.join('data', 'reports',
'embed_resources',
'boxplots.js'))).read(),
'd3_js': open(pkgrf('mriqc', op.join('data', 'reports',
'embed_resources',
'd3.min.js'))).read(),
'boxplots_css': open(pkgrf('mriqc', op.join('data', 'reports',
'embed_resources',
'boxplots.css'))).read()
}, out_file)
return out_file
def _format_labels(row, id_labels):
"""format participant labels"""
crow = []
for col_id, prefix in list(BIDS_COMP.items()):
if col_id in id_labels:
crow.append('%s-%s' % (prefix, row[[col_id]].values[0]))
return '_'.join(crow)
| bsd-3-clause |
peterfpeterson/mantid | qt/applications/workbench/workbench/widgets/plotselector/presenter.py | 3 | 15280 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
import os
import re
from .model import PlotSelectorModel
from .view import PlotSelectorView, Column
class PlotSelectorPresenter(object):
"""
Presenter for the plot selector widget. This class can be
responsible for the creation of the model and view, passing in
the GlobalFigureManager as an argument, or the presenter and view
can be passed as arguments (only intended for testing).
"""
def __init__(self, global_figure_manager, view=None, model=None):
"""
Initialise the presenter, creating the view and model, and
setting the initial plot list
:param global_figure_manager: The GlobalFigureManager class
:param view: Optional - a view to use instead of letting the
class create one (intended for testing)
:param model: Optional - a model to use instead of letting
the class create one (intended for testing)
"""
# Create model and view, or accept mocked versions
if view is None:
self.view = PlotSelectorView(self)
else:
self.view = view
if model is None:
self.model = PlotSelectorModel(self, global_figure_manager)
else:
self.model = model
# Make sure the plot list is up to date
self.update_plot_list()
def get_plot_name_from_number(self, plot_number):
return self.model.get_plot_name_from_number(plot_number)
# ------------------------ Plot Updates ------------------------
def update_plot_list(self):
"""
Updates the plot list in the model and the view. Filter text
is applied to the updated selection if required.
"""
plot_list = self.model.get_plot_list()
self.view.set_plot_list(plot_list)
def append_to_plot_list(self, plot_number):
"""
Appends the plot name to the end of the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.append_to_plot_list(plot_number)
self.view.set_visibility_icon(plot_number, self.model.is_visible(plot_number))
def remove_from_plot_list(self, plot_number):
"""
Removes the plot name from the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.remove_from_plot_list(plot_number)
def rename_in_plot_list(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new name for the plot
"""
self.view.rename_in_plot_list(plot_number, new_name)
# ----------------------- Plot Filtering ------------------------
def filter_text_changed(self):
"""
Called by the view when the filter text is changed (e.g. by
typing or clearing the text)
"""
if self.view.get_filter_text():
self.view.filter_plot_list()
else:
self.view.unhide_all_plots()
def is_shown_by_filter(self, plot_number):
"""
:param plot_number: The unique number in GlobalFigureManager
:return: True if shown, or False if filtered out
"""
filter_text = self.view.get_filter_text()
plot_name = self.get_plot_name_from_number(plot_number)
return filter_text.lower() in plot_name.lower()
# ------------------------ Plot Showing ------------------------
def show_single_selected(self):
"""
When a list item is double clicked the view calls this method
to bring the selected plot to the front
"""
plot_number = self.view.get_currently_selected_plot_number()
self._make_plot_active(plot_number)
def show_multiple_selected(self):
"""
Shows multiple selected plots, e.g. from pressing the 'Show'
button with multiple selected plots
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._make_plot_active(plot_number)
def _make_plot_active(self, plot_number):
"""
Make the plot with the given name active - bring it to the
front and make it the choice for overplotting
:param plot_number: The unique number in GlobalFigureManager
"""
try:
self.model.show_plot(plot_number)
except ValueError as e:
print(e)
def set_active_font(self, plot_number):
"""
Set the icon for the active plot to be colored
:param plot_number: The unique number in GlobalFigureManager
"""
active_plot_number = self.view.active_plot_number
if active_plot_number > 0:
try:
self.view.set_active_font(active_plot_number, False)
except ValueError:
# The last active plot could have been closed
# already, so there is nothing to do
pass
self.view.set_active_font(plot_number, True)
self.view.active_plot_number = plot_number
# ------------------------ Plot Hiding -------------------------
def hide_selected_plots(self):
"""
Hide all plots that are selected in the view
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._hide_plot(plot_number)
def _hide_plot(self, plot_number):
"""
Hides a single plot
"""
try:
self.model.hide_plot(plot_number)
except ValueError as e:
print(e)
def toggle_plot_visibility(self, plot_number):
"""
Toggles a plot between hidden and shown
:param plot_number: The unique number in GlobalFigureManager
"""
if self.model.is_visible(plot_number):
self._hide_plot(plot_number)
else:
self._make_plot_active(plot_number)
self.update_visibility_icon(plot_number)
def update_visibility_icon(self, plot_number):
"""
Updates the icon to indicate a plot as hidden or visible
:param plot_number: The unique number in GlobalFigureManager
"""
try:
is_visible = self.model.is_visible(plot_number)
self.view.set_visibility_icon(plot_number, is_visible)
except ValueError:
# There is a chance the plot was closed, which calls an
# update to this method. If we can not get the visibility
# status it is safe to assume the plot has been closed.
pass
# ------------------------ Plot Renaming ------------------------
def rename_figure(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new plot name
"""
try:
self.model.rename_figure(plot_number, new_name)
except ValueError as e:
# We need to undo the rename in the view
self.view.rename_in_plot_list(plot_number, new_name)
print(e)
# ------------------------ Plot Closing -------------------------
def close_action_called(self):
"""
This is called by the view when closing plots is requested
(e.g. pressing close or delete).
"""
selected_plots = self.view.get_all_selected_plot_numbers()
self._close_plots(selected_plots)
def close_single_plot(self, plot_number):
"""
This is used to close plots when a close action is called
that does not refer to the selected plot(s)
:param plot_number: The unique number in GlobalFigureManager
"""
self._close_plots([plot_number])
def _close_plots(self, list_of_plot_numbers):
"""
Accepts a list of plot names to close
:param list_of_plots: A list of strings containing plot names
"""
for plot_number in list_of_plot_numbers:
try:
self.model.close_plot(plot_number)
except ValueError as e:
print(e)
# ----------------------- Plot Sorting --------------------------
def set_sort_order(self, is_ascending):
"""
Sets the sort order in the view
:param is_ascending: If true ascending order, else descending
"""
self.view.set_sort_order(is_ascending)
def set_sort_type(self, sort_type):
"""
Sets the sort order in the view
:param sort_type: A Column enum with the column to sort on
"""
self.view.set_sort_type(sort_type)
self.update_last_active_order()
def update_last_active_order(self):
"""
Update the sort keys in the view. This is only required when
changes to the last shown order occur in the model, when
renaming the key is set already
"""
if self.view.sort_type() == Column.LastActive:
self._set_last_active_order()
def _set_last_active_order(self):
"""
Set the last shown order in the view. This checks the sorting
currently set and then sets the sort keys to the appropriate
values
"""
last_active_values = self.model.last_active_values()
self.view.set_last_active_values(last_active_values)
def get_initial_last_active_value(self, plot_number):
"""
Gets the initial last active value for a plot just added, in
this case it is assumed to not have been shown
:param plot_number: The unique number in GlobalFigureManager
:return: A string with the last active value
"""
return '_' + self.model.get_plot_name_from_number(plot_number)
def get_renamed_last_active_value(self, plot_number, old_last_active_value):
"""
Gets the initial last active value for a plot that was
renamed. If the plot had a numeric value, i.e. has been shown
this is retained, else it is set
:param plot_number: The unique number in GlobalFigureManager
:param old_last_active_value: The previous last active value
"""
if old_last_active_value.isdigit():
return old_last_active_value
else:
return self.get_initial_last_active_value(plot_number)
# ---------------------- Plot Exporting -------------------------
def export_plots_called(self, extension):
"""
Export plots called from the view, then a single or multiple
plots exported depending on the number currently selected
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
plot_numbers = self.view.get_all_selected_plot_numbers()
if len(plot_numbers) == 1:
self._export_single_plot(plot_numbers[0], extension)
elif len(plot_numbers) > 1:
self._export_multiple_plots(plot_numbers, extension)
def _export_single_plot(self, plot_number, extension):
"""
Called when a single plot is selected to export - prompts for
a filename then tries to save the plot
:param plot_number: The unique number in GlobalFigureManager
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
absolute_path = self.view.get_file_name_for_saving(extension)
if not absolute_path[-4:] == extension:
absolute_path += extension
try:
self.model.export_plot(plot_number, absolute_path)
except ValueError as e:
print(e)
def _export_multiple_plots(self, plot_numbers, extension):
"""
Export all selected plots in the plot_numbers list, first
prompting for a save directory then sanitising plot names to
unique, usable file names
:param plot_numbers: A list of plot numbers to export
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
dir_name = self.view.get_directory_name_for_saving()
# A temporary dictionary holding plot numbers as keys, plot
# names as values
plots = {}
for plot_number in plot_numbers:
plot_name = self.model.get_plot_name_from_number(plot_number)
plot_name = self._replace_special_characters(plot_name)
if plot_name in plots.values():
plot_name = self._make_unique_name(plot_name, plots)
plots[plot_number] = plot_name
self._export_plot(plot_number, plot_name, dir_name, extension)
def _replace_special_characters(self, string):
"""
Removes any characters that are not valid in file names
across all operating systems ('/' for Linux/Mac), more for
Windows
:param string: The string to replace characters in
:return: The string with special characters replace by '-'
"""
return re.sub(r'[<>:"/|\\?*]', r'-', string)
def _make_unique_name(self, name, dictionary):
"""
Given a name and a dictionary, make a unique name that does
not already exist in the dictionary values by appending
' (1)', ' (2)', ' (3)' etc. to the end of the name
:param name: A string with the non-unique name
:param dictionary: A dictionary with string values
:return : The unique plot name
"""
i = 1
while True:
plot_name_attempt = name + ' ({})'.format(str(i))
if plot_name_attempt not in dictionary.values():
break
i += 1
return plot_name_attempt
def _export_plot(self, plot_number, plot_name, dir_name, extension):
"""
Given a plot number, plot name, directory and extension
construct the absolute path name and call the model to save
the figure
:param plot_number: The unique number in GlobalFigureManager
:param plot_name: The name to use for saving
:param dir_name: The directory to save to
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
if dir_name:
filename = os.path.join(dir_name, plot_name + extension)
try:
self.model.export_plot(plot_number, filename)
except ValueError as e:
print(e)
| gpl-3.0 |
ishanic/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
apache/incubator-airflow | airflow/providers/amazon/aws/transfers/hive_to_dynamodb.py | 7 | 4144 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains operator to move data from Hive to DynamoDB."""
import json
from typing import Callable, Optional
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.dynamodb import AwsDynamoDBHook
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
from airflow.utils.decorators import apply_defaults
class HiveToDynamoDBOperator(BaseOperator):
"""
Moves data from Hive to DynamoDB, note that for now the data is loaded
into memory before being pushed to DynamoDB, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the hive database. (templated)
:type sql: str
:param table_name: target DynamoDB table
:type table_name: str
:param table_keys: partition key and sort key
:type table_keys: list
:param pre_process: implement pre-processing of source data
:type pre_process: function
:param pre_process_args: list of pre_process function arguments
:type pre_process_args: list
:param pre_process_kwargs: dict of pre_process function arguments
:type pre_process_kwargs: dict
:param region_name: aws region name (example: us-east-1)
:type region_name: str
:param schema: hive database schema
:type schema: str
:param hiveserver2_conn_id: source hive connection
:type hiveserver2_conn_id: str
:param aws_conn_id: aws connection
:type aws_conn_id: str
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
*,
sql: str,
table_name: str,
table_keys: list,
pre_process: Optional[Callable] = None,
pre_process_args: Optional[list] = None,
pre_process_kwargs: Optional[list] = None,
region_name: Optional[str] = None,
schema: str = 'default',
hiveserver2_conn_id: str = 'hiveserver2_default',
aws_conn_id: str = 'aws_default',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.table_name = table_name
self.table_keys = table_keys
self.pre_process = pre_process
self.pre_process_args = pre_process_args
self.pre_process_kwargs = pre_process_kwargs
self.region_name = region_name
self.schema = schema
self.hiveserver2_conn_id = hiveserver2_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
self.log.info('Extracting data from Hive')
self.log.info(self.sql)
data = hive.get_pandas_df(self.sql, schema=self.schema)
dynamodb = AwsDynamoDBHook(
aws_conn_id=self.aws_conn_id,
table_name=self.table_name,
table_keys=self.table_keys,
region_name=self.region_name,
)
self.log.info('Inserting rows into dynamodb')
if self.pre_process is None:
dynamodb.write_batch_data(json.loads(data.to_json(orient='records')))
else:
dynamodb.write_batch_data(
self.pre_process(data=data, args=self.pre_process_args, kwargs=self.pre_process_kwargs)
)
self.log.info('Done.')
| apache-2.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/scipy/stats/_discrete_distns.py | 6 | 21463 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| mit |
xuewei4d/scikit-learn | examples/gaussian_process/plot_gpc_xor.py | 43 | 2170 | """
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0.5], linewidths=2,
colors=['k'])
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,
edgecolors=(0, 0, 0))
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
PrashntS/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
lucidfrontier45/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 11 | 2828 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N / 2), rng.randint(N, size=N * N / 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix, i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
effigies/mne-python | examples/time_frequency/plot_source_label_time_frequency.py | 2 | 3767 | """
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
print(__doc__)
import numpy as np
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
frequencies = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = frequencies / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
import matplotlib.pyplot as plt
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and phase lock
power, phase_lock = source_induced_power(this_epochs, inverse_operator,
frequencies, label, baseline=(-0.1, 0), baseline_mode='percent',
n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
phase_lock = np.mean(phase_lock, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(phase_lock,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Phase-lock (%s)' % title)
plt.colorbar()
plt.show()
| bsd-3-clause |
nielsbuwen/ilastik | ilastik/applets/nanshe/opColorizeLabelImage.py | 3 | 6472 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
__author__ = "John Kirkham <[email protected]>"
__date__ = "$Oct 24, 2014 08:05:35 EDT$"
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.operators.opBlockedArrayCache import OpBlockedArrayCache
from ilastik.applets.base.applet import DatasetConstraintError
import itertools
import numpy
import matplotlib
import matplotlib.colors
import matplotlib.cm
import vigra
import nanshe
import nanshe.util.iters
import nanshe.imp.segment
import nanshe.util.xnumpy
class OpColorizeLabelImage(Operator):
"""
Lazy computation of colors
"""
name = "OpColorizeLabelImage"
category = "Pointwise"
Input = InputSlot()
NumColors = InputSlot(value=256, stype='int')
Output = OutputSlot()
@staticmethod
def colorTableList(num_colors):
colors = []
# Transparent for the zero label
colors.append((0,0,0,0))
rgb_color_values = list(nanshe.util.iters.splitting_xrange(num_colors))
converter = matplotlib.colors.ColorConverter()
for _ in rgb_color_values:
a_rgb_color = tuple()
for __ in converter.to_rgba(matplotlib.cm.gist_rainbow(_)):
a_rgb_color += ( int(round(255*__)), )
colors.append(a_rgb_color)
colors = numpy.asarray(colors, dtype=numpy.uint8)
return(colors)
def __init__(self, *args, **kwargs):
super( OpColorizeLabelImage, self ).__init__( *args, **kwargs )
self.colors = numpy.zeros((0,4), dtype=numpy.uint8)
self.Input.notifyReady( self._checkConstraints )
def _checkConstraints(self, *args):
slot = self.Input
sh = slot.meta.shape
ndim = len(sh)
ax = slot.meta.axistags
tsh = slot.meta.getTaggedShape()
if ("c" in tsh):
if (tsh["c"] != 1):
raise DatasetConstraintError(
"ColorizeLabelImage",
"Input image cannot have a non-singleton channel dimension.")
if (ndim == 1):
raise DatasetConstraintError(
"ColorizeLabelImage",
"There must be more dimensions than just the channel dimension.")
if not ax[-1].isChannel():
raise DatasetConstraintError(
"ColorizeLabelImage",
"Input image must have channel last." )
def setupOutputs(self):
# Copy the input metadata to both outputs
self.Output.meta.assignFrom( self.Input.meta )
self.Output.meta.shape = self.Output.meta.shape[:-1] + (4,)
self.Output.meta.dtype = numpy.uint8
dims = [_ for _ in self.Output.meta.axistags if not _.isChannel()]
self.Output.meta.axistags = vigra.AxisTags(*(dims + [vigra.AxisInfo.c]))
def execute(self, slot, subindex, roi, result):
key = roi.toSlice()
input_key = list(key)
input_key = input_key[:-1] + [slice(None)]
input_key = tuple(input_key)
raw = self.Input[input_key].wait()
if not self.colors.size:
self.colors = OpColorizeLabelImage.colorTableList(self.NumColors.value)
processed = numpy.empty(nanshe.util.iters.len_slices(key), dtype=numpy.uint8)
for each_label in numpy.unique(raw):
mask = (raw == each_label)
mask = mask[..., 0]
processed[mask, :] = self.colors[each_label, key[-1]]
if slot.name == 'Output':
result[...] = processed
def propagateDirty(self, slot, subindex, roi):
if (slot.name == "Input"):
key = roi.toSlice()
key = list(key)
key = key[:-1] + [slice(None)]
key = tuple(key)
self.Output.setDirty(key)
else:
assert False, "Unknown dirty input slot"
class OpColorizeLabelImageCached(Operator):
"""
Given an input image and max/min bounds,
masks out (i.e. sets to zero) all pixels that fall outside the bounds.
"""
name = "OpColorizeLabelImageCached"
category = "Pointwise"
Input = InputSlot()
NumColors = InputSlot(value=256, stype='int')
Output = OutputSlot()
def __init__(self, *args, **kwargs):
super( OpColorizeLabelImageCached, self ).__init__( *args, **kwargs )
self.opColorizeLabelImage = OpColorizeLabelImage(parent=self)
self.opColorizeLabelImage.NumColors.connect(self.NumColors)
self.opCache = OpBlockedArrayCache(parent=self)
self.opCache.fixAtCurrent.setValue(False)
self.opColorizeLabelImage.Input.connect( self.Input )
self.opCache.Input.connect( self.opColorizeLabelImage.Output )
self.Output.connect( self.opCache.Output )
def setupOutputs(self):
axes_shape_iter = itertools.izip(self.opColorizeLabelImage.Output.meta.axistags,
self.opColorizeLabelImage.Output.meta.shape)
block_shape = []
for each_axistag, each_len in axes_shape_iter:
if each_axistag.isSpatial():
each_len = min(each_len, 256)
elif each_axistag.isTemporal():
each_len = min(each_len, 50)
block_shape.append(each_len)
block_shape = tuple(block_shape)
self.opCache.innerBlockShape.setValue(block_shape)
self.opCache.outerBlockShape.setValue(block_shape)
def setInSlot(self, slot, subindex, roi, value):
pass
def propagateDirty(self, slot, subindex, roi):
pass
| gpl-3.0 |
liangz0707/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/indexes/common.py | 4 | 35247 | # -*- coding: utf-8 -*-
import pytest
from pandas import compat
from pandas.compat import PY3
import numpy as np
from pandas import (Series, Index, Float64Index, Int64Index, UInt64Index,
RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex,
TimedeltaIndex, PeriodIndex, IntervalIndex,
notnull, isnull)
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.dtypes.common import needs_i8_conversion
from pandas._libs.tslib import iNaT
import pandas.util.testing as tm
import pandas as pd
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def setup_indices(self):
for name, idx in self.indices.items():
setattr(self, name, idx)
def verify_pickle(self, index):
unpickled = tm.round_trip_pickle(index)
assert index.equals(unpickled)
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
pytest.raises(TypeError, self._holder)
def test_to_series(self):
# assert that we are creating a copy of the index
idx = self.create_index()
s = idx.to_series()
assert s.values is not idx.values
assert s.index is not idx
assert s.name == idx.name
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
pytest.raises(NotImplementedError, idx.shift, 1)
pytest.raises(NotImplementedError, idx.shift, 1, 2)
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = 'foo'
result = pd.Index(expected)
tm.assert_index_equal(result, expected)
result = pd.Index(expected, name='bar')
expected.name = 'bar'
tm.assert_index_equal(result, expected)
else:
expected.names = ['foo', 'bar']
result = pd.Index(expected)
tm.assert_index_equal(
result, Index(Index([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'),
names=['foo', 'bar']))
result = pd.Index(expected, names=['A', 'B'])
tm.assert_index_equal(
result,
Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')],
dtype='object'), names=['A', 'B']))
def test_numeric_compat(self):
idx = self.create_index()
tm.assert_raises_regex(TypeError, "cannot perform __mul__",
lambda: idx * 1)
tm.assert_raises_regex(TypeError, "cannot perform __mul__",
lambda: 1 * idx)
div_err = "cannot perform __truediv__" if PY3 \
else "cannot perform __div__"
tm.assert_raises_regex(TypeError, div_err, lambda: idx / 1)
tm.assert_raises_regex(TypeError, div_err, lambda: 1 / idx)
tm.assert_raises_regex(TypeError, "cannot perform __floordiv__",
lambda: idx // 1)
tm.assert_raises_regex(TypeError, "cannot perform __floordiv__",
lambda: 1 // idx)
def test_logical_compat(self):
idx = self.create_index()
tm.assert_raises_regex(TypeError, 'cannot perform all',
lambda: idx.all())
tm.assert_raises_regex(TypeError, 'cannot perform any',
lambda: idx.any())
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with tm.assert_raises_regex(ValueError, 'Invalid fill method'):
idx.get_indexer(idx, method='invalid')
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
def test_dtype_str(self):
for idx in self.indices.values():
dtype = idx.dtype_str
assert isinstance(dtype, compat.string_types)
assert dtype == str(idx.dtype)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert '...' not in str(idx)
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
tm.assert_raises_regex(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
for ind in self.indices.values():
# don't tests a MultiIndex here (as its tested separated)
if isinstance(ind, MultiIndex):
continue
original_name = ind.name
new_ind = ind.set_names([new_name])
assert new_ind.name == new_name
assert ind.name == original_name
res = ind.rename(new_name, inplace=True)
# should return None
assert res is None
assert ind.name == new_name
assert ind.names == [new_name]
# with tm.assert_raises_regex(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with tm.assert_raises_regex(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind.rename(name, inplace=True)
assert ind.name == name
assert ind.names == [name]
def test_hash_error(self):
for ind in self.indices.values():
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(ind).__name__):
hash(ind)
def test_copy_name(self):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
for name, index in compat.iteritems(self.indices):
if isinstance(index, MultiIndex):
continue
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_ensure_copied_data(self):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
for name, index in compat.iteritems(self.indices):
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs['freq'] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
continue
index_type = index.__class__
result = index_type(index.values, copy=True, **init_kwargs)
tm.assert_index_equal(index, result)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='copy')
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False,
**init_kwargs)
tm.assert_numpy_array_equal(index._values, result._values,
check_same='same')
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='same')
tm.assert_numpy_array_equal(index._values, result._values,
check_same='same')
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for ind in self.indices.values():
# don't tests a MultiIndex here (as its tested separated)
if isinstance(ind, MultiIndex):
continue
for func in (copy, deepcopy):
idx_copy = func(ind)
assert idx_copy is not ind
assert idx_copy.equals(ind)
new_copy = ind.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_duplicates(self):
for ind in self.indices.values():
if not len(ind):
continue
if isinstance(ind, MultiIndex):
continue
idx = self._holder([ind[0]] * 5)
assert not idx.is_unique
assert idx.has_duplicates
# GH 10115
# preserve names
idx.name = 'foo'
result = idx.drop_duplicates()
assert result.name == 'foo'
tm.assert_index_equal(result, Index([ind[0]], name='foo'))
def test_get_unique_index(self):
for ind in self.indices.values():
# MultiIndex tested separately
if not len(ind) or isinstance(ind, MultiIndex):
continue
idx = ind[[0] * 5]
idx_unique = ind[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique
try:
assert not idx_unique.hasnans
except NotImplementedError:
pass
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, idx_unique)
# nans:
if not ind._can_hold_na:
continue
if needs_i8_conversion(ind):
vals = ind.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = ind.values[[0] * 5]
vals[0] = np.nan
vals_unique = vals[:2]
idx_nan = ind._shallow_copy(vals)
idx_unique_nan = ind._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique
assert idx_nan.dtype == ind.dtype
assert idx_unique_nan.dtype == ind.dtype
for dropna, expected in zip([False, True],
[idx_unique_nan, idx_unique]):
for i in [idx_nan, idx_unique_nan]:
result = i._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, expected)
def test_sort(self):
for ind in self.indices.values():
pytest.raises(TypeError, ind.sort)
def test_mutability(self):
for ind in self.indices.values():
if not len(ind):
continue
pytest.raises(TypeError, ind.__setitem__, 0, ind[0])
def test_view(self):
for ind in self.indices.values():
i_view = ind.view()
assert i_view.name == ind.name
def test_compat(self):
for ind in self.indices.values():
assert ind.tolist() == list(ind)
def test_memory_usage(self):
for name, index in compat.iteritems(self.indices):
result = index.memory_usage()
if len(index):
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_argsort(self):
for k, ind in self.indices.items():
# separately tested
if k in ['catIndex']:
continue
result = ind.argsort()
expected = np.array(ind).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
for k, ind in self.indices.items():
result = np.argsort(ind)
expected = ind.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(ind), (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg,
np.argsort, ind, axis=1)
msg = "the 'kind' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argsort,
ind, kind='mergesort')
msg = "the 'order' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argsort,
ind, order=('a', 'b'))
def test_pickle(self):
for ind in self.indices.values():
self.verify_pickle(ind)
ind.name = 'foo'
self.verify_pickle(ind)
def test_take(self):
indexer = [4, 3, 0, 2]
for k, ind in self.indices.items():
# separate
if k in ['boolIndex', 'tuples', 'empty']:
continue
result = ind.take(indexer)
expected = ind[indexer]
assert result.equals(expected)
if not isinstance(ind,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
with pytest.raises(AttributeError):
ind.freq
def test_take_invalid_kwargs(self):
idx = self.create_index()
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_repeat(self):
rep = 2
i = self.create_index()
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.repeat,
i, rep, axis=0)
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
_nan = i._na_value
cond = [False] + [True] * len(i[1:])
expected = pd.Index([_nan] + i[1:].tolist(), dtype=i.dtype)
result = i.where(cond)
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
i = self.create_index()
_nan = i._na_value
cond = [False] + [True] * (len(i) - 1)
klasses = [list, tuple, np.array, pd.Series]
expected = pd.Index([_nan] + i[1:].tolist(), dtype=i.dtype)
for klass in klasses:
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_setops_errorcases(self):
for name, idx in compat.iteritems(self.indices):
# # non-iterable input
cases = [0.5, 'xxx']
methods = [idx.intersection, idx.union, idx.difference,
idx.symmetric_difference]
for method in methods:
for case in cases:
tm.assert_raises_regex(TypeError,
"Input must be Index "
"or array-like",
method, case)
def test_intersection_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.intersection(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.intersection([1, 2, 3])
def test_union_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[3:]
second = idx[:5]
everything = idx
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.union(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.union(case)
assert tm.equalContents(result, everything)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.union([1, 2, 3])
def test_difference_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[2:]
second = idx[:4]
answer = idx[4:]
result = first.difference(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.difference(case)
elif isinstance(idx, CategoricalIndex):
pass
elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
assert result.__class__ == answer.__class__
tm.assert_numpy_array_equal(result.asi8, answer.asi8)
else:
result = first.difference(case)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.difference([1, 2, 3])
def test_symmetric_difference(self):
for name, idx in compat.iteritems(self.indices):
first = idx[1:]
second = idx[:-1]
if isinstance(idx, CategoricalIndex):
pass
else:
answer = idx[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.symmetric_difference(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.symmetric_difference(case)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.symmetric_difference([1, 2, 3])
# 12591 deprecated
with tm.assert_produces_warning(FutureWarning):
first.sym_diff(second)
def test_insert_base(self):
for name, idx in compat.iteritems(self.indices):
result = idx[1:4]
if not len(idx):
continue
# test 0th element
assert idx[0:4].equals(result.insert(0, idx[0]))
def test_delete_base(self):
for name, idx in compat.iteritems(self.indices):
if not len(idx):
continue
if isinstance(idx, RangeIndex):
# tested in class
continue
expected = idx[1:]
result = idx.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = idx[:-1]
result = idx.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
result = idx.delete(len(idx))
def test_equals(self):
for name, idx in compat.iteritems(self.indices):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(idx, RangeIndex):
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
if isinstance(index_a, PeriodIndex):
return
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == index_d
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == series_d
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with tm.assert_raises_regex(ValueError, msg):
series_a == series_d
with tm.assert_raises_regex(ValueError, "Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_numpy_ufuncs(self):
# test ufuncs of numpy 1.9.2. see:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
# some functions are skipped because it may return different result
# for unicode input depending on numpy version
for name, idx in compat.iteritems(self.indices):
for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,
np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin,
np.arccos, np.arctan, np.sinh, np.cosh, np.tanh,
np.arcsinh, np.arccosh, np.arctanh, np.deg2rad,
np.rad2deg]:
if isinstance(idx, DatetimeIndexOpsMixin):
# raise TypeError or ValueError (PeriodIndex)
# PeriodIndex behavior should be changed in future version
with pytest.raises(Exception):
with np.errstate(all='ignore'):
func(idx)
elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):
# coerces to float (e.g. np.sin)
with np.errstate(all='ignore'):
result = func(idx)
exp = Index(func(idx.values), name=idx.name)
tm.assert_index_equal(result, exp)
assert isinstance(result, pd.Float64Index)
else:
# raise AttributeError or TypeError
if len(idx) == 0:
continue
else:
with pytest.raises(Exception):
with np.errstate(all='ignore'):
func(idx)
for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:
if isinstance(idx, DatetimeIndexOpsMixin):
# raise TypeError or ValueError (PeriodIndex)
with pytest.raises(Exception):
func(idx)
elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):
# Results in bool array
result = func(idx)
assert isinstance(result, np.ndarray)
assert not isinstance(result, Index)
else:
if len(idx) == 0:
continue
else:
with pytest.raises(Exception):
func(idx)
def test_hasnans_isnans(self):
# GH 11343, added tests for hasnans / isnans
for name, index in self.indices.items():
if isinstance(index, MultiIndex):
pass
else:
idx = index.copy()
# cases in indices doesn't include NaN
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
assert not idx.hasnans
idx = index.copy()
values = idx.values
if len(index) == 0:
continue
elif isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans
def test_fillna(self):
# GH 11343
for name, index in self.indices.items():
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isnull is not defined for MultiIndex"
with tm.assert_raises_regex(NotImplementedError, msg):
idx.fillna(idx[0])
else:
idx = index.copy()
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with tm.assert_raises_regex(TypeError, msg):
idx.fillna([idx[0]])
idx = index.copy()
values = idx.values
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans
def test_nulls(self):
# this is really a smoke test for the methods
# as these are adequantely tested for function elsewhere
for name, index in self.indices.items():
if len(index) == 0:
tm.assert_numpy_array_equal(
index.isnull(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isnull is not defined for MultiIndex"
with tm.assert_raises_regex(NotImplementedError, msg):
idx.isnull()
else:
if not index.hasnans:
tm.assert_numpy_array_equal(
index.isnull(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(
index.notnull(), np.ones(len(index), dtype=bool))
else:
result = isnull(index)
tm.assert_numpy_array_equal(index.isnull(), result)
tm.assert_numpy_array_equal(index.notnull(), ~result)
def test_empty(self):
# GH 15270
index = self.create_index()
assert not index.empty
assert index[:0].empty
| mit |
fejoa/IVANWorldmapResearch | WorldBuild_Fast_3d.py | 1 | 7639 | # -*- coding: utf-8 -*-
"""
IVAN Worldmap Research
Copyright (C) Ryan van Herel
Released under the GNU General
Public License
See LICENSING which should be included
along with this file for more details
@author: fejoa
"""
import os
from random import randint
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import axes3d
from matplotlib.colors import LinearSegmentedColormap
cdict = {'red': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.0, 1.0),
(1.0, 0.1, 1.0)),
'blue': ((0.0, 0.0, 0.1),
(0.5, 1.0, 0.0),
(1.0, 0.0, 0.0)),
'alpha': ((0.0, 1.0, 1.0),
# (0.25,1.0, 1.0),
(0.5, 0.3, 0.3),
# (0.75,1.0, 1.0),
(1.0, 1.0, 1.0))
}
valpuri = LinearSegmentedColormap('valpurus', cdict)
plt.register_cmap(cmap=valpuri)
class worldmap:
def __init__(self, length, width, smooth, steps, GENERATE_CONTINENTS):
self.__length = length
self.__width = width
self.__area = length * width
self.__AltitudeBuffer = np.zeros((width, length))
self.__OldAltitudeBuffer = np.zeros((width, length))
self.__DisplayMap = np.zeros((width, length))
self.__gen_initial_map(smooth, steps, GENERATE_CONTINENTS)
def __gen_initial_map(self, smooth, steps, GENERATE_CONTINENTS):
#create initial random map
HYBRID = 2
if GENERATE_CONTINENTS == HYBRID or GENERATE_CONTINENTS == 1:
for x in range(self.__width):
for y in range(self.__length):
self.__AltitudeBuffer[x][y] = (4000 - randint(0, 8000))
if GENERATE_CONTINENTS == HYBRID or GENERATE_CONTINENTS == 0:
#create "splodges"
for x in range(self.__width/2):
for y in range(self.__length/2):
self.__AltitudeBuffer[x][y] += (randint(0, x*y)) - 800
for x in range(self.__width/2, self.__width):
for y in range(self.__length/2, self.__length):
self.__AltitudeBuffer[x][y] += (randint(0, (self.__width-x)*(self.__length-y))) - 800
for x in range(self.__width/2):
for y in range(self.__length/2, self.__length):
self.__AltitudeBuffer[x][y] += (randint(0, (x)*(self.__length-y))) - 800
for x in range(self.__width/2, self.__width):
for y in range(self.__length/2):
self.__AltitudeBuffer[x][y] += (randint(0, (self.__width-x)*(y))) - 800
if smooth == 1:
self.__smooth_altitude(steps)
print "DONE"
def __quantize_grid(self):
LAND = 1
SEA = 0
for x in range(self.__width):
for y in range(self.__length):
if self.__AltitudeBuffer[x][y] > 0.0:
self.__DisplayMap[x][y] = LAND
else:
self.__DisplayMap[x][y] = SEA
def __smooth_altitude(self, steps):
for c in range(steps):
#self.__plot_landsea(c, steps)
for y in range(self.__length):
self.__safe_smooth(0, y)
for x in range(1, self.__width - 1):
self.__safe_smooth(x, 0)
for y in range(1, self.__length - 1):
self.__fast_smooth(x, y)
self.__safe_smooth(x, self.__length - 1)
for y in range(self.__length):
self.__safe_smooth(self.__width - 1, y)
if(c > 8):
self.__plot_landsea(c, steps)
def __safe_smooth(self, x, y):
HeightNear = 0
SquaresNear = 0
DirX = [ -1, -1, -1, 0, 0, 1, 1, 1 ]
DirY = [ -1, 0, 1, -1, 1, -1, 0, 1 ]
for d in range(0, 4):
X = x + DirX[d]
Y = y + DirY[d]
if self.__is_valid_position(X, Y):
HeightNear += self.__OldAltitudeBuffer[X][Y]
SquaresNear += 1
for d in range(4, 7):
X = x + DirX[d]
Y = y + DirY[d]
if self.__is_valid_position(X, Y):
HeightNear += self.__AltitudeBuffer[X][Y]
SquaresNear += 1
self.__OldAltitudeBuffer[x][y] = self.__AltitudeBuffer[x][y]
self.__AltitudeBuffer[x][y] = HeightNear / SquaresNear
def __fast_smooth(self, x, y):
HeightNear = 0
DirX = [ -1, -1, -1, 0, 0, 1, 1, 1 ]
DirY = [ -1, 0, 1, -1, 1, -1, 0, 1 ]
for d in range(0, 4):
HeightNear += self.__OldAltitudeBuffer[x + DirX[d]][y + DirY[d]]
for d in range(4, 7):
HeightNear += self.__AltitudeBuffer[x + DirX[d]][y + DirY[d]]
self.__OldAltitudeBuffer[x][y] = self.__AltitudeBuffer[x][y];
self.__AltitudeBuffer[x][y] = HeightNear / 8;
def __is_valid_position(self, X, Y):
return ((X >= 0) and (Y >= 0) and (X < self.__width) and (Y < self.__length))
def __plot_landsea(self, step, maxsteps):
mini = np.min(self.__AltitudeBuffer)
maxi = np.max(self.__AltitudeBuffer)
difi = (maxi - mini) / 9
absmax = max(abs(mini), maxi)
print "max altitude is ", maxi
print "min altitude is ", mini
destination = os.path.dirname(os.path.abspath(__file__)) + str(r'\outputs\%d'% step) + str(r'.png')
#self.__quantize_grid()
# fig = plt.figure()
# plt.imshow(self.__DisplayMap, interpolation='bilinear', origin='lower', cmap=cm.winter)
# CS = plt.contour(self.__DisplayMap, [0, 1], cmap=cm.winter)
# CB = plt.colorbar(CS, shrink=0.8, extend='both')
# l,b,w,h = plt.gca().get_position().bounds
# ll,bb,ww,hh = CB.ax.get_position().bounds
# CB.ax.set_position([ll, b+0.1*h, ww, h*0.8])
# plt.savefig(destination, bbox_inches='tight')
elevations = [-2000, -200, -100, -50, 0, 1, 50, 100, 200, 2000]
cols = ('#0000e6', '#0000ff', '#1a1aff', '#3333ff', '#33cc33', '#2eb82e', '#29a329', '#248f24', '#1f7a1f', '#1a651a')
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(elev=20, azim=30)
x = y = np.linspace(0,127,128)
X, Y = np.meshgrid(x, y)
Z = self.__AltitudeBuffer
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.3, cmap=valpuri, linewidth=0.1)
#cset = ax.contourf(X, Y, Z, zdir='z', offset=mini, cmap=valpuri, levels=elevations) # , colors=cols)#np.arange(mini, maxi, difi))
cset = ax.contourf(X, Y, Z, zdir='z', offset=mini, colors=cols, levels=elevations)
#cset.cmap.set_under('#0000e6')
#cset.cmap.set_over('#1a651a')
plt.title(str(r'Valpuri step %d'% step))
ax.set_xlabel('X')
ax.set_xlim(0, 127)
ax.set_ylabel('Y')
ax.set_ylim(0, 127)
ax.set_zlabel('Z')
#ax.set_zlim(-4000, 4000)
ax.set_zlim(-absmax, absmax)
#ax.set_zlim(mini, maxi)
cbar = plt.colorbar(cset)
plt.savefig(destination, bbox_inches='tight')
#plt.show()
if (step >= 9):
plt.show()
# useage: worldmap(XSize, YSize, use smoothing? [Y/n], number of steps in smoothing, 0=single island 1=lots of continents 2=continent with islands)
world = worldmap(128, 128, 1, 10, 1)
| gpl-2.0 |
gjhiggins/electrum | plugins/__init__.py | 4 | 4981 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import electrum
from electrum.i18n import _
descriptions = [
{
'name': 'audio_modem',
'fullname': _('Audio MODEM'),
'description': _('Provides support for air-gapped transaction signing.'),
'requires': [('amodem', 'http://github.com/romanz/amodem/')],
'available_for': ['qt'],
},
{
'name': 'btchipwallet',
'fullname': _('Ledger Wallet'),
'description': _('Provides support for Ledger hardware wallet'),
'requires': [('btchip', 'github.com/ledgerhq/btchip-python')],
'requires_wallet_type': ['btchip'],
'registers_wallet_type': ('hardware', 'btchip', _("Ledger wallet")),
'available_for': ['qt', 'cmdline'],
},
{
'name': 'cosigner_pool',
'fullname': _('Cosigner Pool'),
'description': ' '.join([
_("This plugin facilitates the use of multi-signatures wallets."),
_("It sends and receives partially signed transactions from/to your cosigner wallet."),
_("Transactions are encrypted and stored on a remote server.")
]),
'requires_wallet_type': ['2of2', '2of3'],
'available_for': ['qt'],
},
{
'name': 'email_requests',
'fullname': 'Email',
'description': _("Send and receive payment request with an email account"),
'available_for': ['qt'],
},
{
'name': 'exchange_rate',
'fullname': _("Exchange rates"),
'description': _("Exchange rates and currency conversion tools."),
'available_for': ['qt'],
},
{
'name': 'greenaddress_instant',
'fullname': 'GreenAddress instant',
'description': _("Allows validating if your transactions have instant confirmations by GreenAddress"),
'available_for': ['qt'],
},
{
'name':'keepkey',
'fullname': 'KeepKey',
'description': _('Provides support for KeepKey hardware wallet'),
'requires': [('keepkeylib','github.com/keepkey/python-keepkey')],
'requires_wallet_type': ['keepkey'],
'registers_wallet_type': ('hardware', 'keepkey', _("KeepKey wallet")),
'available_for': ['qt', 'cmdline'],
},
{
'name': 'labels',
'fullname': _('LabelSync'),
'description': '\n'.join([
_("The new and improved LabelSync plugin. This can sync your labels across multiple Electrum installs by using a remote database to save your data. Labels, transactions ids and addresses are encrypted before they are sent to the remote server."),
_("The label sync's server software is open-source as well and can be found on github.com/maran/electrum-sync-server")
]),
'available_for': ['qt']
},
{
'name': 'plot',
'fullname': 'Plot History',
'description': _("Ability to plot transaction history in graphical mode."),
'requires': [('matplotlib', 'matplotlib')],
'available_for': ['qt'],
},
{
'name':'trezor',
'fullname': 'Trezor Wallet',
'description': _('Provides support for Trezor hardware wallet'),
'requires': [('trezorlib','github.com/trezor/python-trezor')],
'requires_wallet_type': ['trezor'],
'registers_wallet_type': ('hardware', 'trezor', _("Trezor wallet")),
'available_for': ['qt', 'cmdline'],
},
{
'name': 'trustedcoin',
'fullname': _('Two Factor Authentication'),
'description': ''.join([
_("This plugin adds two-factor authentication to your wallet."), '<br/>',
_("For more information, visit"),
" <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
]),
'requires_wallet_type': ['2fa'],
'registers_wallet_type': ('twofactor', '2fa', _("Wallet with two-factor authentication")),
'available_for': ['qt', 'cmdline'],
},
{
'name': 'virtualkeyboard',
'fullname': 'Virtual Keyboard',
'description': '%s\n%s' % (_("Add an optional virtual keyboard to the password dialog."), _("Warning: do not use this if it makes you pick a weaker password.")),
'available_for': ['qt'],
}
]
| gpl-3.0 |
tangyouze/tushare | tushare/datayes/macro.py | 17 | 191349 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Macro():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def ChinaMacroData(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国宏观主要指标数据,历史数据从1928年开始。具体指标请查询“中国宏观指标”API。
"""
code, result = self.client.getData(vs.CHINAMACRODATA%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaMacroInfo(self, indicID='', indicNameAbbr='', parentID='', field=''):
"""
包含中国宏观指标信息。输入中国宏观指标代码或名称,查询具体指标信息,如:指标频度、单位、来源等。
"""
code, result = self.client.getData(vs.CHINAMACROINFO%(indicID, indicNameAbbr, parentID, field))
return _ret_data(code, result)
def GlobalMacroData(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含全球宏观20多个主要国家地区重要宏观指标数据,历史数据从1900年开始。具体指标请查询“全球宏观指标”API。
"""
code, result = self.client.getData(vs.GLOBALMACRODATA%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def GlobalMacroInfo(self, indicID='', indicNameAbbr='', parentID='', field=''):
"""
包含全球宏观指标信息。输入全球宏观指标代码或名称,查询具体指标信息,如,指标频度、单位、来源等。
"""
code, result = self.client.getData(vs.GLOBALMACROINFO%(indicID, indicNameAbbr, parentID, field))
return _ret_data(code, result)
def IndustrialData(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含行业主要指标数据,历史数据从1947年开始。具体指标请查询“行业经济指标”API。
"""
code, result = self.client.getData(vs.INDUSTRIALDATA%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def IndustrialInfo(self, indicID='', indicNameAbbr='', parentID='', field=''):
"""
包含行业指标信息。输入行业经济指标代码或名称,查询具体指标信息,如,指标频度、单位、来源等。
"""
code, result = self.client.getData(vs.INDUSTRIALINFO%(indicID, indicNameAbbr, parentID, field))
return _ret_data(code, result)
def EcommerceData(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含电商指标数据,历史数据从2014年8月开始。具体指标请查询“电商指标”API。
"""
code, result = self.client.getData(vs.ECOMMERCEDATA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceInfo(self, indicID='', indicNameAbbr='', parentID='', field=''):
"""
包含电商指标信息。输入电商指标代码或名称,查询具体指标信息,如,指标频度、单位、来源等。
"""
code, result = self.client.getData(vs.ECOMMERCEINFO%(indicID, indicNameAbbr, parentID, field))
return _ret_data(code, result)
def ChinaDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国GDP数据,具体指标可参见API文档;历史数据从1984年开始,按季更新。
"""
code, result = self.client.getData(vs.CHINADATAGDP%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataECI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国宏观经济景气指数数据,具体指标可参见API文档;历史数据从1993年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAECI%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataPMI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国PMI、汇丰中国PMI数据,具体指标可参见API文档;历史数据从2005年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAPMI%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataCCI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国消费者景气指数数据,具体指标可参见API文档;历史数据从1993年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATACCI%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataEconomistsBoomIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国经济学家景气指数数据,具体指标可参见API文档;历史数据从2006年开始,按季更新。
"""
code, result = self.client.getData(vs.CHINADATAECONOMISTSBOOMINDEX%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataIndustrialBusinessClimateIndex(self, indicID='', indicName='',
beginDate='', endDate='', field=''):
"""
包含中国工业景气指数数据,具体指标可参见API文档;历史数据从1999年开始,按季更新。
"""
code, result = self.client.getData(vs.CHINADATAINDUSTRIALBUSINESSCLIMATEINDEX%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataCPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国居民消费价格指数(CPI)数据,,含36大中城市CPI数据,具体指标可参见API文档;历史数据从1993年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATACPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataPPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国工业价格指数数据,如PPI、分行业PPI、PPIRM,具体指标可参见API文档;历史数据从1993年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAPPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国工业数据,如工业生产运行、工业企业主要经济指标,具体指标可参见API文档;历史数据从1993年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataRetailSales(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国社会消费品零售数据,具体指标可参见API文档;历史数据从1984年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATARETAILSALES%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataResidentIncomeExp(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国城镇、农村居民家庭收支数据,具体指标可参见API文档;历史数据从1984年开始,按季更新。
"""
code, result = self.client.getData(vs.CHINADATARESIDENTINCOMEEXP%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataFAI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国固定资产投资数据,如投资额、资金来源、分行业投资,具体指标可参见API文档;历史数据从1990年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAFAI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataRealEstate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国房地产开发数据,如房地产景气指数、投资额、来源、商品房销售、土地开发购置,具体指标可参见API文档;历史数据从1991年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAREALESTATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国进出口数据,具体指标可参见API文档;历史数据从1990年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAFOREIGNTRADE%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataFDI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国外商直接投资数据,具体指标可参见API文档;历史数据从1999年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAFDI%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataMoneyStatistics(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国货币统计数据,如货币供应、黄金外汇储备,具体指标可参见API文档;历史数据从1951年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAMONEYSTATISTICS%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataAllSystemFinancing(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国社会融资规模数据,具体指标可参见API文档;历史数据从2002年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAALLSYSTEMFINANCING%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataLendingDeposit(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国金融机构存贷款数据,具体指标可参见API文档;历史数据从1978年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATALENDINGDEPOSIT%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataCreditFundsTable(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国金融机构信贷收支表数据,具体指标可参见API文档;历史数据从1952年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATACREDITFUNDSTABLE%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataOpenMarketOperation(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国人民银行公开市场回购数据,如正回购、逆回购,具体指标可参见API文档;历史数据从1952年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAOPENMARKETOPERATION%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国人民币汇率数据,如人民币汇率中间价、人民币汇率指数,具体指标可参见API文档;历史数据从1994年1月开始,按日更新。
"""
code, result = self.client.getData(vs.CHINADATAEXCHANGERATE%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataInterestRateLendingDeposit(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国官方发布的存贷款利率数据,具体指标可参见API文档;历史数据从1949年8月开始,按日更新。
"""
code, result = self.client.getData(vs.CHINADATAINTERESTRATELENDINGDEPOSIT%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataInterestRateSHIBOR(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国银行间同业拆借(Shibor)数据,具体指标可参见API文档;历史数据从2006年10月开始,按日更新。
"""
code, result = self.client.getData(vs.CHINADATAINTERESTRATESHIBOR%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataInterestRateInterbankRepo(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国银行间同业拆借数据,如质押式回购、买断式回购,具体指标可参见API文档;历史数据从2005年1月开始,按日更新。
"""
code, result = self.client.getData(vs.CHINADATAINTERESTRATEINTERBANKREPO%(indicID, indicName,
beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国财政数据,如全国财政收支、各省及直辖市财政收入,具体指标可参见API文档;历史数据从1990年开始,按月更新。
"""
code, result = self.client.getData(vs.CHINADATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ChinaDataGoldClosePrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含中国上海黄金收盘价数据,具体指标可参见API文档;历史数据从2004年9月开始,按日更新。
"""
code, result = self.client.getData(vs.CHINADATAGOLDCLOSEPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国GDP数据,具体指标可参见API文档;历史数据从1947年开始,按季更新。
"""
code, result = self.client.getData(vs.USDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国对外贸易数据,具体指标可参见API文档;历史数据从1992年开始,按月更新。
"""
code, result = self.client.getData(vs.USDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国价格指数数据,如CPI、PPI、进出口价格指数,具体指标可参见API文档;历史数据从1913年开始,按月更新。
"""
code, result = self.client.getData(vs.USDATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataEmploymentUnemployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国就业与失业数据,如非农就业、ADP就业报告,具体指标可参见API文档;历史数据从1939年开始,按月更新。
"""
code, result = self.client.getData(vs.USDATAEMPLOYMENTUNEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataInterestRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国利率数据,如联邦基金利率、国债收益率、Libor美元,具体指标可参见API文档;历史数据从1954年7月开始,按日更新。
"""
code, result = self.client.getData(vs.USDATAINTERESTRATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国汇率数据,如美元对主要货币、美元指数,具体指标可参见API文档;历史数据从1973年1月开始,按日更新。
"""
code, result = self.client.getData(vs.USDATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国货币供应量数据,具体指标可参见API文档;历史数据从1959年开始,按月更新。
"""
code, result = self.client.getData(vs.USDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataConsumerCredit(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国消费信贷数据,具体指标可参见API文档;历史数据从1943年开始,按月更新。
"""
code, result = self.client.getData(vs.USDATACONSUMERCREDIT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国景气指数数据,如PMI、消费者信息指数、ECRI领先指标,具体指标可参见API文档;历史数据从1948年开始,按月更新。
"""
code, result = self.client.getData(vs.USDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataDurableGoods(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国工业中耐用品数据,具体指标可参见API文档;历史数据从1992年开始,按月更新。
"""
code, result = self.client.getData(vs.USDATADURABLEGOODS%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataRealEstate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国房地产数据,具体指标可参见API文档;历史数据从1959年开始,按月更新。
"""
code, result = self.client.getData(vs.USDATAREALESTATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def USDataDomesticTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国国内贸易数据,具体指标可参见API文档;历史数据从1992年开始,按月更新。
"""
code, result = self.client.getData(vs.USDATADOMESTICTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟GDP数据,具体指标可参见API文档;历史数据从1995年开始,按季更新。
"""
code, result = self.client.getData(vs.EUDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟对外贸易数据,具体指标可参见API文档;历史数据从2013年开始,按月更新。
"""
code, result = self.client.getData(vs.EUDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟价格指数数据,如CPI、PPI、单位进出口价格指数,具体指标可参见API文档;历史数据从1996年开始,按月更新。
"""
code, result = self.client.getData(vs.EUDATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataEmploymentUnemployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟就业与失业数据,如就业失业率、劳动力成本指数,具体指标可参见API文档;历史数据从1993年开始,按月更新。
"""
code, result = self.client.getData(vs.EUDATAEMPLOYMENTUNEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataInterestRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟利率数据,如Libor、政府债券收益率、欧元区公债收益率,具体指标可参见API文档;历史数据从1980年1月开始,按日更新。
"""
code, result = self.client.getData(vs.EUDATAINTERESTRATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟汇率数据,具体指标可参见API文档;历史数据从1999年1月开始,按日更新。
"""
code, result = self.client.getData(vs.EUDATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataBanking(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟金融数据,如货币供应、官方储备资产,具体指标可参见API文档;历史数据从1980年开始,按月更新。
"""
code, result = self.client.getData(vs.EUDATABANKING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟景气指数数据,如PMI、经济景气指数、消费者信息指数,具体指标可参见API文档;历史数据从1999年开始,按月更新。
"""
code, result = self.client.getData(vs.EUDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟工业数据,如营建产出、供应生产指数,具体指标可参见API文档;历史数据从2013年开始,按月更新。
"""
code, result = self.client.getData(vs.EUDATAINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EUDataRetail(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含欧盟零售销售数据,具体指标可参见API文档;历史数据从2013年开始,按月更新。
"""
code, result = self.client.getData(vs.EUDATARETAIL%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SwitzerlandDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含瑞士GDP数据,具体指标可参见API文档;历史数据从1980年开始,按季更新。
"""
code, result = self.client.getData(vs.SWITZERLANDDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SwitzerlandDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含瑞士价格指数数据,如CPI、PPI,具体指标可参见API文档;历史数据从1922年开始,按月更新。
"""
code, result = self.client.getData(vs.SWITZERLANDDATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SwitzerlandDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含瑞士SVME采购经理人指数数据,具体指标可参见API文档;历史数据从2007年开始,按月更新。
"""
code, result = self.client.getData(vs.SWITZERLANDDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SwitzerlandDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含瑞士货币供应量数据,具体指标可参见API文档;历史数据从1975年开始,按月更新。
"""
code, result = self.client.getData(vs.SWITZERLANDDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SwedenDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含瑞典GDP数据,具体指标可参见API文档;历史数据从1993年开始,按季更新。
"""
code, result = self.client.getData(vs.SWEDENDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SwedenDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含瑞典价格指数数据,如CPI、PPI、进出口价格指数,具体指标可参见API文档;历史数据从1980年开始,按月更新。
"""
code, result = self.client.getData(vs.SWEDENDATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SwedenDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含瑞典对外贸易数据,具体指标可参见API文档;历史数据从1975年开始,按月更新。
"""
code, result = self.client.getData(vs.SWEDENDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国GDP数据,具体指标可参见API文档;历史数据从1970年开始,按季更新。
"""
code, result = self.client.getData(vs.KOREADATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国价格指数数据,具体指标可参见API文档;历史数据从1965年开始,按月更新。
"""
code, result = self.client.getData(vs.KOREADATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaDataEmploymentUnemployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国就业与失业数据,具体指标可参见API文档;历史数据从1999年开始,按月更新。
"""
code, result = self.client.getData(vs.KOREADATAEMPLOYMENTUNEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国利率数据,具体指标可参见API文档;历史数据从1995年1月开始,按日更新。
"""
code, result = self.client.getData(vs.KOREADATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国汇率数据,具体指标可参见API文档;历史数据从1964年5月开始,按日更新。
"""
code, result = self.client.getData(vs.KOREADATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国货币供应量数据,具体指标可参见API文档;历史数据从1970年开始,按月更新。
"""
code, result = self.client.getData(vs.KOREADATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国景气指数数据,如企业景气调查指数、消费者调查指数、PMI、消费者信心指数,具体指标可参见API文档;历史数据从2008年开始,按月更新。
"""
code, result = self.client.getData(vs.KOREADATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaData_ExternalDebt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国外债数据,具体指标可参见API文档;历史数据从1994年开始,按季更新。
"""
code, result = self.client.getData(vs.KOREADATA_EXTERNALDEBT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaDataIndustryandService(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国工业与服务业数据,如工业指数、制造业指数、服务业活动指数,具体指标可参见API文档;历史数据从1970年开始,按月更新。
"""
code, result = self.client.getData(vs.KOREADATAINDUSTRYANDSERVICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def KoreaDataRealEstate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含韩国房地产数据,具体指标可参见API文档;历史数据从1987年开始,按月更新。
"""
code, result = self.client.getData(vs.KOREADATAREALESTATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def AustraliaDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳大利亚GDP数据,具体指标可参见API文档;历史数据从1959年开始,按季更新。
"""
code, result = self.client.getData(vs.AUSTRALIADATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def AustraliaDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳大利亚贸易数据,如对外贸易、零售销售,具体指标可参见API文档;历史数据从1971年开始,按月更新。
"""
code, result = self.client.getData(vs.AUSTRALIADATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def AustraliaDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳大利亚价格指数数据,如消费者物价指数数据(CPI)、生产价格指数数据(PPI),具体指标可参见API文档;历史数据从1948年开始,按季更新。
"""
code, result = self.client.getData(vs.AUSTRALIADATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def AustraliaDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳大利亚就业和工资数据,如就业、ANZ总招聘广告,具体指标可参见API文档;历史数据从1978年开始,按月更新。
"""
code, result = self.client.getData(vs.AUSTRALIADATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def AustraliaDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳大利亚景气调查数据,如消费者信心指数、PMI、PSC、PCI,具体指标可参见API文档;历史数据从2002年开始,按月更新。
"""
code, result = self.client.getData(vs.AUSTRALIADATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ItalyDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含意大利国内生产总值(GDP)数据,具体指标可参见API文档;历史数据从1992年开始,按季更新。
"""
code, result = self.client.getData(vs.ITALYDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ItalyDataPaymentsBalance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含意大利国际收支数据,具体指标可参见API文档;历史数据从1970年开始,按季更新。
"""
code, result = self.client.getData(vs.ITALYDATAPAYMENTSBALANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ItalyDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含意大利价格指数数据,具体指标可参见API文档;历史数据从1996年开始,按月更新。
"""
code, result = self.client.getData(vs.ITALYDATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ItalyDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含意大利就业和工资数据,如就业、工资,具体指标可参见API文档;历史数据从1983年开始,按月更新。
"""
code, result = self.client.getData(vs.ITALYDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ItalyDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含意大利财政数据,具体指标可参见API文档;历史数据从1995年开始,按年更新。
"""
code, result = self.client.getData(vs.ITALYDATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ItalyDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含意大利景气调查数据,具体指标可参见API文档;历史数据从1985年开始,按月更新。
"""
code, result = self.client.getData(vs.ITALYDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ItalyDataInterestRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含意大利利率数据,具体指标可参见API文档;历史数据从1980年开始,按月更新。
"""
code, result = self.client.getData(vs.ITALYDATAINTERESTRATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SpainDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含西班牙GDP数据,具体指标可参见API文档;历史数据从1960年开始,按季更新。
"""
code, result = self.client.getData(vs.SPAINDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SpainDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含西班牙对外贸易数据,具体指标可参见API文档;历史数据从1960年开始,按年更新。
"""
code, result = self.client.getData(vs.SPAINDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SpainDataPaymentsBalance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含西班牙国际收支数据,具体指标可参见API文档;历史数据从1980年开始,按年更新。
"""
code, result = self.client.getData(vs.SPAINDATAPAYMENTSBALANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SpainDataBanking(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含西班牙金融数据,如货币供应、银行业,具体指标可参见API文档;历史数据从1960年开始,按年更新。
"""
code, result = self.client.getData(vs.SPAINDATABANKING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SpainDataTransportation(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含西班牙交通运输和电信数据,具体指标可参见API文档;历史数据从1960年开始,按年更新。
"""
code, result = self.client.getData(vs.SPAINDATATRANSPORTATION%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SpainDataEnergy(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含西班牙能源数据,具体指标可参见API文档;历史数据从1960年开始,按年更新。
"""
code, result = self.client.getData(vs.SPAINDATAENERGY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SpainDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含西班牙财政数据,具体指标可参见API文档;历史数据从1995年开始,按年更新。
"""
code, result = self.client.getData(vs.SPAINDATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CanadaDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含加拿大GDP数据,具体指标可参见API文档;历史数据从1962年开始,按季更新。
"""
code, result = self.client.getData(vs.CANADADATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CanadaDataPaymentsBalance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含加拿大国际收支数据,具体指标可参见API文档;历史数据从2012年开始,按月更新。
"""
code, result = self.client.getData(vs.CANADADATAPAYMENTSBALANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CanadaDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含加拿大对外贸易数据,具体指标可参见API文档;历史数据从2008年开始,按月更新。
"""
code, result = self.client.getData(vs.CANADADATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CanadaDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含加拿大价格指数数据,如CPI、生产价格指数,具体指标可参见API文档;历史数据从1981年开始,按月更新。
"""
code, result = self.client.getData(vs.CANADADATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CanadaDataBanking(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含加拿大金融数据,如货币供应量、国际储备,具体指标可参见API文档;历史数据从2002年开始,按月更新。
"""
code, result = self.client.getData(vs.CANADADATABANKING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CanadaDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含加拿大就业和工资数据,如就业、平均周薪,具体指标可参见API文档;历史数据从1991年开始,按月更新。
"""
code, result = self.client.getData(vs.CANADADATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CanadaDataManufacturing(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含加拿大工业和制造业数据,具体指标可参见API文档;历史数据从2008年开始,按月更新。
"""
code, result = self.client.getData(vs.CANADADATAMANUFACTURING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CanadaDataRealEstate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含加拿大房地产数据,如新房屋价格指数、建设许可,具体指标可参见API文档;历史数据从2003年开始,按月更新。
"""
code, result = self.client.getData(vs.CANADADATAREALESTATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CanadaDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含加拿大景气调查数据,如Markit采购经理人指数、IVEY采购经理人指数,具体指标可参见API文档;历史数据从2011年开始,按月更新。
"""
code, result = self.client.getData(vs.CANADADATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港GDP数据,具体指标可参见API文档;历史数据从1973年开始,按季更新。
"""
code, result = self.client.getData(vs.HKDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港对外贸易及投资数据,具体指标可参见API文档;历史数据从1952年开始,按月更新。
"""
code, result = self.client.getData(vs.HKDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港价格指数数据,如CPI、PPI,具体指标可参见API文档;历史数据从1974年开始,按月更新。
"""
code, result = self.client.getData(vs.HKDATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港财政数据,具体指标可参见API文档;历史数据从2007年开始,按月更新。
"""
code, result = self.client.getData(vs.HKDATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataBanking(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港金融数据,如货币金融概况、流通货币,具体指标可参见API文档;历史数据从1968年开始,按月更新。
"""
code, result = self.client.getData(vs.HKDATABANKING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港工业数据,具体指标可参见API文档;历史数据从2009年开始,按季更新。
"""
code, result = self.client.getData(vs.HKDATAINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataConsumption(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港零售业总销货数据,具体指标可参见API文档;历史数据从2001年开始,按月更新。
"""
code, result = self.client.getData(vs.HKDATACONSUMPTION%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataThroughput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港货物吞吐量数据,具体指标可参见API文档;历史数据从2010年开始,按季更新。
"""
code, result = self.client.getData(vs.HKDATATHROUGHPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港人口与就业数据,具体指标可参见API文档;历史数据从1981年开始,按月更新。
"""
code, result = self.client.getData(vs.HKDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataInterestRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港利率数据,如利率、银行同业拆息,具体指标可参见API文档;历史数据从1980年1月开始,按日更新。
"""
code, result = self.client.getData(vs.HKDATAINTERESTRATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港汇率数据,具体指标可参见API文档;历史数据从2007年7月开始,按日更新。
"""
code, result = self.client.getData(vs.HKDATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataRealEstate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港房地产数据,具体指标可参见API文档;历史数据从2000年开始,按月更新。
"""
code, result = self.client.getData(vs.HKDATAREALESTATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HKDataTourism(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含香港旅游业数据,如访港旅客、酒店住宿,具体指标可参见API文档;历史数据从2010年开始,按月更新。
"""
code, result = self.client.getData(vs.HKDATATOURISM%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndiaDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度GDP数据,具体指标可参见API文档;历史数据从2009年开始,按季更新。
"""
code, result = self.client.getData(vs.INDIADATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndiaDataPaymentsBalance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度国际收支数据,如外债、国际投资寸头,具体指标可参见API文档;历史数据从2006年开始,按季更新。
"""
code, result = self.client.getData(vs.INDIADATAPAYMENTSBALANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndiaDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度价格指数数据,如CPI、批发价格指数、房屋价格指数,具体指标可参见API文档;历史数据从1995年开始,按月更新。
"""
code, result = self.client.getData(vs.INDIADATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndiaDataTourism(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度旅游业数据,具体指标可参见API文档;历史数据从2005年开始,按月更新。
"""
code, result = self.client.getData(vs.INDIADATATOURISM%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndiaDataEnergy(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度能源数据,具体指标可参见API文档;历史数据从2007年开始,按月更新。
"""
code, result = self.client.getData(vs.INDIADATAENERGY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndiaDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度景气调查数据,具体指标可参见API文档;历史数据从2012年开始,按月更新。
"""
code, result = self.client.getData(vs.INDIADATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndiaDataBanking(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度金融数据,如货币供应量、对外贸易,具体指标可参见API文档;历史数据从2011年开始,按周更新。
"""
code, result = self.client.getData(vs.INDIADATABANKING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndiaDataIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度工业数据,具体指标可参见API文档;历史数据从1981年开始,按月更新。
"""
code, result = self.client.getData(vs.INDIADATAINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndiaDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度对外贸易数据,具体指标可参见API文档;历史数据从1994年开始,按月更新。
"""
code, result = self.client.getData(vs.INDIADATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MalaysiaDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含马来西亚GDP数据,具体指标可参见API文档;历史数据从2007年开始,按季更新。
"""
code, result = self.client.getData(vs.MALAYSIADATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MalaysiaDataPaymentsBalance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含马来西亚国际收支数据,如国际收支、外汇准备金,具体指标可参见API文档;历史数据从1998年开始,按季更新。
"""
code, result = self.client.getData(vs.MALAYSIADATAPAYMENTSBALANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MalaysiaDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含马来西亚对外贸易数据,具体指标可参见API文档;历史数据从1996年开始,按月更新。
"""
code, result = self.client.getData(vs.MALAYSIADATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MalaysiaDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含马来西亚价格指数据,如PPI、消费者价格指数,具体指标可参见API文档;历史数据从1997年开始,按月更新。
"""
code, result = self.client.getData(vs.MALAYSIADATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MalaysiaDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含马来西亚就业数据,具体指标可参见API文档;历史数据从2010年开始,按月更新。
"""
code, result = self.client.getData(vs.MALAYSIADATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MalaysiaDataIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含马来西亚工业数据,具体指标可参见API文档;历史数据从2007年开始,按月更新。
"""
code, result = self.client.getData(vs.MALAYSIADATAINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MalaysiaDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含马来西亚财政数据,具体指标可参见API文档;历史数据从1996年开始,按季更新。
"""
code, result = self.client.getData(vs.MALAYSIADATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MalaysiaDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含马来西亚货币供应量数据,具体指标可参见API文档;历史数据从1997年开始,按月更新。
"""
code, result = self.client.getData(vs.MALAYSIADATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MalaysiaDataRealEstate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含马来西亚房地产数据,具体指标可参见API文档;历史数据从2004年开始,按季更新。
"""
code, result = self.client.getData(vs.MALAYSIADATAREALESTATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndonesiaDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度尼西亚GDP数据,具体指标可参见API文档;历史数据从2001年开始,按季更新。
"""
code, result = self.client.getData(vs.INDONESIADATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndonesiaDataPaymentsBalance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度尼西亚国际收支国际收支、外债数据,具体指标可参见API文档;历史数据从1996年开始,按季更新。
"""
code, result = self.client.getData(vs.INDONESIADATAPAYMENTSBALANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndonesiaDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度尼西亚对外贸易数据,具体指标可参见API文档;历史数据从1999年开始,按月更新。
"""
code, result = self.client.getData(vs.INDONESIADATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndonesiaDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度尼西亚消费者价格指数(CPI)(CPI)数据,具体指标可参见API文档;历史数据从2000年开始,按月更新。
"""
code, result = self.client.getData(vs.INDONESIADATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndonesiaDataIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度尼西亚工业数据,具体指标可参见API文档;历史数据从2003年开始,按年更新。
"""
code, result = self.client.getData(vs.INDONESIADATAINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndonesiaDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度尼西亚财政数据,具体指标可参见API文档;历史数据从2003年开始,按季更新。
"""
code, result = self.client.getData(vs.INDONESIADATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndonesiaDataBanking(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度尼西亚货币供应量数据,具体指标可参见API文档;历史数据从1989年开始,按月更新。
"""
code, result = self.client.getData(vs.INDONESIADATABANKING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndonesiaDataSecurity(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度尼西亚证券市场数据,如资本市场发行的股票和债券、政府未尝还投资组合,具体指标可参见API文档;历史数据从2002年开始,按月更新。
"""
code, result = self.client.getData(vs.INDONESIADATASECURITY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def IndonesiaDataTourism(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含印度尼西亚旅游业数据,具体指标可参见API文档;历史数据从2008年开始,按月更新。
"""
code, result = self.client.getData(vs.INDONESIADATATOURISM%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TurkeyDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含土耳其GDP数据,具体指标可参见API文档;历史数据从1998年开始,按季更新。
"""
code, result = self.client.getData(vs.TURKEYDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TurkeyDataPaymentsBalance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含土耳其国际收支数据,如国际收支、外汇储备、外债,具体指标可参见API文档;历史数据从1981年开始,按月更新。
"""
code, result = self.client.getData(vs.TURKEYDATAPAYMENTSBALANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TurkeyDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含土耳其对外贸易数据,具体指标可参见API文档;历史数据从2005年开始,按月更新。
"""
code, result = self.client.getData(vs.TURKEYDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TurkeyDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含土耳其价格指数数据,如CPI、PPI,具体指标可参见API文档;历史数据从2004年开始,按月更新。
"""
code, result = self.client.getData(vs.TURKEYDATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TurkeyDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含土耳其就业数据,具体指标可参见API文档;历史数据从2012年开始,按月更新。
"""
code, result = self.client.getData(vs.TURKEYDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TurkeyDataIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含土耳其工业数据,具体指标可参见API文档;历史数据从2012年开始,按月更新。
"""
code, result = self.client.getData(vs.TURKEYDATAINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TurkeyDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含土耳其财政数据,具体指标可参见API文档;历史数据从2006年开始,按月更新。
"""
code, result = self.client.getData(vs.TURKEYDATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TurkeyDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含土耳其货币供应量数据,具体指标可参见API文档;历史数据从2005年开始,按周更新。
"""
code, result = self.client.getData(vs.TURKEYDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ThailandDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含泰国GDP数据,具体指标可参见API文档;历史数据从1993年开始,按季更新。
"""
code, result = self.client.getData(vs.THAILANDDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ThailandDataPaymentsBalance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含泰国国际收支数据,如国际收支、外汇储备、外债,具体指标可参见API文档;历史数据从1987年开始,按月更新。
"""
code, result = self.client.getData(vs.THAILANDDATAPAYMENTSBALANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ThailandDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含泰国对外贸易数据,具体指标可参见API文档;历史数据从2010年开始,按月更新。
"""
code, result = self.client.getData(vs.THAILANDDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ThailandDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含泰国价格指数数据,如CPI、PPI,具体指标可参见API文档;历史数据从1976年开始,按月更新。
"""
code, result = self.client.getData(vs.THAILANDDATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ThailandDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含泰国就业数据,如就业、劳动指数、劳动生产率指数,具体指标可参见API文档;历史数据从2000年开始,按月更新。
"""
code, result = self.client.getData(vs.THAILANDDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ThailandDataIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含泰国工业数据,如工业生产指数、产能利用率、出货量指数、产成品存货指数,具体指标可参见API文档;历史数据从2000年开始,按月更新。
"""
code, result = self.client.getData(vs.THAILANDDATAINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ThailandDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含泰国财政数据,具体指标可参见API文档;历史数据从2002年开始,按月更新。
"""
code, result = self.client.getData(vs.THAILANDDATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ThailandDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含泰国商业景气指数数据,具体指标可参见API文档;历史数据从1999年开始,按月更新。
"""
code, result = self.client.getData(vs.THAILANDDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ThailandDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含泰国货币供应量数据,具体指标可参见API文档;历史数据从1997年开始,按月更新。
"""
code, result = self.client.getData(vs.THAILANDDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国GDP数据,具体指标可参见API文档;历史数据从1955年开始,按季更新。
"""
code, result = self.client.getData(vs.UKDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国对外贸易数据,具体指标可参见API文档;历史数据从1971年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataCPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国消费者价格指数(CPI)数据,具体指标可参见API文档;历史数据从1988年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATACPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataRPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国零售价格指数(RPI)数据,具体指标可参见API文档;历史数据从1948年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATARPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国就业数据,具体指标可参见API文档;历史数据从1971年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国货币供应量数据,具体指标可参见API文档;历史数据从1998年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataConsumerCredit(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国消费信贷数据,具体指标可参见API文档;历史数据从1997年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATACONSUMERCREDIT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国景气指数数据,具体指标可参见API文档;历史数据从1981年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国财政收支数据,具体指标可参见API文档;历史数据从1991年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataIndustrialPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国工业生产指数数据,具体指标可参见API文档;历史数据从1968年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATAINDUSTRIALPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataHousePI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国房价指数数据,具体指标可参见API文档;历史数据从1983年开始,按月更新。
"""
code, result = self.client.getData(vs.UKDATAHOUSEPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国利率数据,具体指标可参见API文档;历史数据从1975年1月开始,按日更新。
"""
code, result = self.client.getData(vs.UKDATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UKDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含英国汇率数据,具体指标可参见API文档;历史数据从1975年1月开始,按日更新。
"""
code, result = self.client.getData(vs.UKDATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本GDP数据,具体指标可参见API文档;历史数据从1980年开始,按季更新。
"""
code, result = self.client.getData(vs.JAPANDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本对外贸易数据,具体指标可参见API文档;历史数据从1979年开始,按月更新。
"""
code, result = self.client.getData(vs.JAPANDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataCPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本消费者价格指数(CPI)数据,具体指标可参见API文档;历史数据从1970年开始,按月更新。
"""
code, result = self.client.getData(vs.JAPANDATACPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本就业与失业数据,具体指标可参见API文档;历史数据从1953年开始,按月更新。
"""
code, result = self.client.getData(vs.JAPANDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本货币供应量数据,具体指标可参见API文档;历史数据从2007年开始,按月更新。
"""
code, result = self.client.getData(vs.JAPANDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本景气指数数据,如景气动向指数、消费者信心指数、PMI,具体指标可参见API文档;历史数据从1980年开始,按月更新。
"""
code, result = self.client.getData(vs.JAPANDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataIndustrialPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本工业生产指数数据,具体指标可参见API文档;历史数据从2004年开始,按月更新。
"""
code, result = self.client.getData(vs.JAPANDATAINDUSTRIALPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataHousePI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本房价指数数据,具体指标可参见API文档;历史数据从2004年开始,按月更新。
"""
code, result = self.client.getData(vs.JAPANDATAHOUSEPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本利率数据,具体指标可参见API文档;历史数据从1998年1月开始,按日更新。
"""
code, result = self.client.getData(vs.JAPANDATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def JapanDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含日本汇率数据,具体指标可参见API文档;历史数据从1998年1月开始,按日更新。
"""
code, result = self.client.getData(vs.JAPANDATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国GDP数据,具体指标可参见API文档;历史数据从1991年开始,按季更新。
"""
code, result = self.client.getData(vs.GERMANYDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国对外贸易数据,具体指标可参见API文档;历史数据从2002年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataCPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国消费者价格指数(CPI)数据,具体指标可参见API文档;历史数据从1994年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATACPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataPPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国生产者价格指数(PPI)数据,具体指标可参见API文档;历史数据从2005年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATAPPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataImportExportPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国进出口价格指数数据,具体指标可参见API文档;历史数据从2000年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATAIMPORTEXPORTPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国就业与失业数据,具体指标可参见API文档;历史数据从2000年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国货币供应量数据,具体指标可参见API文档;历史数据从1995年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国景气指数数据,如商业景气指数、ZEW景气指数、PMI、消费者信息指数,具体指标可参见API文档;历史数据从1985年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国财政收支数据,具体指标可参见API文档;历史数据从2004年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataIndustrialPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国工业生产指数数据,具体指标可参见API文档;历史数据从1991年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATAINDUSTRIALPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataRealEstate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国房地产数据,具体指标可参见API文档;历史数据从2003年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATAREALESTATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataDomesticTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国国内贸易数据,具体指标可参见API文档;历史数据从1994年开始,按月更新。
"""
code, result = self.client.getData(vs.GERMANYDATADOMESTICTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def GermanyDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含德国利率数据,具体指标可参见API文档;历史数据从1997年8月开始,按日更新。
"""
code, result = self.client.getData(vs.GERMANYDATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国财政收支数据,具体指标可参见API文档;历史数据从1995年开始,按年更新。
"""
code, result = self.client.getData(vs.FRANCEDATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国GDP数据,具体指标可参见API文档;历史数据从1978年开始,按季更新。
"""
code, result = self.client.getData(vs.FRANCEDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国对外贸易数据,具体指标可参见API文档;历史数据从1990年开始,按月更新。
"""
code, result = self.client.getData(vs.FRANCEDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataCPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国消费者价格指数(CPI)数据,具体指标可参见API文档;历史数据从1996年开始,按月更新。
"""
code, result = self.client.getData(vs.FRANCEDATACPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataPPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国生产者价格指数(PPI)数据,具体指标可参见API文档;历史数据从1999年开始,按月更新。
"""
code, result = self.client.getData(vs.FRANCEDATAPPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataImportPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国进口价格指数数据,具体指标可参见API文档;历史数据从2005年开始,按月更新。
"""
code, result = self.client.getData(vs.FRANCEDATAIMPORTPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国就业数据,具体指标可参见API文档;历史数据从1983年开始,按月更新。
"""
code, result = self.client.getData(vs.FRANCEDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国货币供应量数据,具体指标可参见API文档;历史数据从2007年开始,按月更新。
"""
code, result = self.client.getData(vs.FRANCEDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国景气指数数据,具体指标可参见API文档;历史数据从1985年开始,按月更新。
"""
code, result = self.client.getData(vs.FRANCEDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataIndustrialPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国工业生产指数数据,具体指标可参见API文档;历史数据从1990年开始,按月更新。
"""
code, result = self.client.getData(vs.FRANCEDATAINDUSTRIALPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataDomesticTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国国内贸易数据,具体指标可参见API文档;历史数据从1995年开始,按月更新。
"""
code, result = self.client.getData(vs.FRANCEDATADOMESTICTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FranceDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含法国利率数据,具体指标可参见API文档;历史数据从1980年1月开始,按日更新。
"""
code, result = self.client.getData(vs.FRANCEDATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾GDP数据,具体指标可参见API文档;历史数据从1961年开始,按季更新。
"""
code, result = self.client.getData(vs.TAIWANDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataExternalDebt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾外债数据,具体指标可参见API文档;历史数据从1999年开始,按季更新。
"""
code, result = self.client.getData(vs.TAIWANDATAEXTERNALDEBT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾对外贸易数据,具体指标可参见API文档;历史数据从1980年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataCPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾消费者价格指数(CPI)数据,具体指标可参见API文档;历史数据从1981年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATACPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataImportExportPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾进出口价格指数数据,具体指标可参见API文档;历史数据从1981年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATAIMPORTEXPORTPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾就业数据,具体指标可参见API文档;历史数据从1978年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾货币供应量数据,具体指标可参见API文档;历史数据从1961年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataLendingDeposit(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾存贷款数据,具体指标可参见API文档;历史数据从1997年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATALENDINGDEPOSIT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataReserveFund(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾准备金数据,具体指标可参见API文档;历史数据从1998年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATARESERVEFUND%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾景气指数数据,具体指标可参见API文档;历史数据从1982年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataFinance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾财政收支数据,具体指标可参见API文档;历史数据从1961年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATAFINANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataIndustrialPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾工业生产指数数据,具体指标可参见API文档;历史数据从1971年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATAINDUSTRIALPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataRealEstate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾房地产数据,具体指标可参见API文档;历史数据从1991年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATAREALESTATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataTourism(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾旅游业数据,具体指标可参见API文档;历史数据从1999年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATATOURISM%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataCrossStraitTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾两岸贸易数据,具体指标可参见API文档;历史数据从1997年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATACROSSSTRAITTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataBusinessandEconomy(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾商业与经济数据,具体指标可参见API文档;历史数据从1999年开始,按月更新。
"""
code, result = self.client.getData(vs.TAIWANDATABUSINESSANDECONOMY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾利率数据,具体指标可参见API文档;历史数据从2002年5月开始,按日更新。
"""
code, result = self.client.getData(vs.TAIWANDATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TaiwanDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含台湾汇率数据,具体指标可参见API文档;历史数据从1993年1月开始,按日更新。
"""
code, result = self.client.getData(vs.TAIWANDATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MacaoDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳门GDP数据,具体指标可参见API文档;历史数据从2000年开始,按季更新。
"""
code, result = self.client.getData(vs.MACAODATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MacaoDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳门价格指数数据,具体指标可参见API文档;历史数据从1998年开始,按月更新。
"""
code, result = self.client.getData(vs.MACAODATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MacaoDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳门就业数据,具体指标可参见API文档;历史数据从1996年开始,按月更新。
"""
code, result = self.client.getData(vs.MACAODATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MacaoDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳门货币供应量数据,具体指标可参见API文档;历史数据从1984年开始,按月更新。
"""
code, result = self.client.getData(vs.MACAODATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MacaoDataForeignExchangeReserves(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳门外汇储备数据,具体指标可参见API文档;历史数据从1984年开始,按月更新。
"""
code, result = self.client.getData(vs.MACAODATAFOREIGNEXCHANGERESERVES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MacaoDataTourism(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳门旅游业数据,具体指标可参见API文档;历史数据从1997年开始,按月更新。
"""
code, result = self.client.getData(vs.MACAODATATOURISM%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MacaoDataGamingIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳门博彩业数据,具体指标可参见API文档;历史数据从2005年开始,按月更新。
"""
code, result = self.client.getData(vs.MACAODATAGAMINGINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MacaoDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳门利率数据,具体指标可参见API文档;历史数据从1988年开始,按月更新。
"""
code, result = self.client.getData(vs.MACAODATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MacaoDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含澳门汇率数据,具体指标可参见API文档;历史数据从1984年开始,按月更新。
"""
code, result = self.client.getData(vs.MACAODATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RussiaDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含俄罗斯GDP数据,具体指标可参见API文档;历史数据从1995年开始,按季更新。
"""
code, result = self.client.getData(vs.RUSSIADATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RussiaDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含俄罗斯对外贸易数据,具体指标可参见API文档;历史数据从1997年开始,按月更新。
"""
code, result = self.client.getData(vs.RUSSIADATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RussiaDataCPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含俄罗斯消费者价格指数(CPI)数据,具体指标可参见API文档;历史数据从2002年开始,按月更新。
"""
code, result = self.client.getData(vs.RUSSIADATACPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RussiaDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含俄罗斯货币供应量数据,具体指标可参见API文档;历史数据从1996年开始,按月更新。
"""
code, result = self.client.getData(vs.RUSSIADATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RussiaDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含俄罗斯景气指数数据,具体指标可参见API文档;历史数据从2009年开始,按月更新。
"""
code, result = self.client.getData(vs.RUSSIADATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RussiaDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含俄罗斯利率数据,具体指标可参见API文档;历史数据从2000年8月开始,按日更新。
"""
code, result = self.client.getData(vs.RUSSIADATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RussiaDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含俄罗斯汇率数据,具体指标可参见API文档;历史数据从1992年7月开始,按日更新。
"""
code, result = self.client.getData(vs.RUSSIADATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BrazilDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含巴西GDP数据,具体指标可参见API文档;历史数据从2000年开始,按季更新。
"""
code, result = self.client.getData(vs.BRAZILDATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BrazilDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含巴西对外贸易数据,具体指标可参见API文档;历史数据从1996年开始,按月更新。
"""
code, result = self.client.getData(vs.BRAZILDATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BrazilDataPriceIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含巴西价格指数数据,具体指标可参见API文档;历史数据从1970年开始,按月更新。
"""
code, result = self.client.getData(vs.BRAZILDATAPRICEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BrazilDataEmployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含巴西就业数据,具体指标可参见API文档;历史数据从1997年开始,按月更新。
"""
code, result = self.client.getData(vs.BRAZILDATAEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BrazilDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含巴西货币供应量数据,具体指标可参见API文档;历史数据从1988年开始,按月更新。
"""
code, result = self.client.getData(vs.BRAZILDATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BrazilDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含巴西景气指数数据,具体指标可参见API文档;历史数据从2007年开始,按月更新。
"""
code, result = self.client.getData(vs.BRAZILDATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BrazilDataRetailSale(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含巴西零售销售数据,具体指标可参见API文档;历史数据从2004年开始,按月更新。
"""
code, result = self.client.getData(vs.BRAZILDATARETAILSALE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BrazilDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含巴西利率数据,具体指标可参见API文档;历史数据从2011年9月开始,按日更新。
"""
code, result = self.client.getData(vs.BRAZILDATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BrazilDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含巴西汇率数据,具体指标可参见API文档;历史数据从1999年4月开始,按日更新。
"""
code, result = self.client.getData(vs.BRAZILDATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataGDP(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非GDP数据,具体指标可参见API文档;历史数据从1993年开始,按季更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATAGDP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataEmploymentUnemployment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非就业与失业数据,具体指标可参见API文档;历史数据从2006年开始,按季更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATAEMPLOYMENTUNEMPLOYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataForeignTrade(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非对外贸易数据,具体指标可参见API文档;历史数据从2004年开始,按月更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATAFOREIGNTRADE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataCPI(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非消费者价格指数(CPI)数据,具体指标可参见API文档;历史数据从2008年开始,按月更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATACPI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataMoneySupply(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非货币供应量数据,具体指标可参见API文档;历史数据从1965年开始,按月更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATAMONEYSUPPLY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataClimateIndex(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非景气指数数据,具体指标可参见API文档;历史数据从1970年开始,按月更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATACLIMATEINDEX%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataIndustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非工业数据,具体指标可参见API文档;历史数据从1993年开始,按月更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATAINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataRealEstate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非房地产数据,具体指标可参见API文档;历史数据从1999年开始,按月更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATAREALESTATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataRetailSales(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非零售销售数据,具体指标可参见API文档;历史数据从2000年开始,按月更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATARETAILSALES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataInterestRates(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非利率数据,具体指标可参见API文档;历史数据从1980年12月开始,按日更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATAINTERESTRATES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SouthAfricaDataExchangeRate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含南非汇率数据,具体指标可参见API文档;历史数据从1970年1月开始,按日更新。
"""
code, result = self.client.getData(vs.SOUTHAFRICADATAEXCHANGERATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def AgricDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含农林牧渔行业价格类数据,即主要农产品价格,主要畜禽产品价格,猪存栏及猪粮比,主要水产品批发价格(威海市),农产品及农副期货收盘价,消费生产指数,具体指标可参见API文档;历史数据从1947年1月开始,数据按日更新。
"""
code, result = self.client.getData(vs.AGRICDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def AgricDataOutpV(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含农林牧渔行业产值数据,具体指标可参见API文档;历史数据从2000年3月开始,数据按季更新。
"""
code, result = self.client.getData(vs.AGRICDATAOUTPV%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def AgricDataWASDE(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国农业部对全球主要农产品供需预测数据,具体指标可参见API文档;历史数据从1984年6月开始,数据按月更新。
"""
code, result = self.client.getData(vs.AGRICDATAWASDE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def AgricDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含农林牧渔进出口数据,具体指标可参见API文档;历史数据从1997年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.AGRICDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FoodBvgDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含食品饮料价格类数据,即酒类零售价,乳制品价格,桶装食用油零售价,肉类批发价,调味品零售价,具体指标可参见API文档;历史数据从2004年1月开始,数据按日更新。
"""
code, result = self.client.getData(vs.FOODBVGDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FoodBvgDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含食品饮料产销及库存数据,具体指标可参见API文档;历史数据从1998年3月开始,数据按季更新。
"""
code, result = self.client.getData(vs.FOODBVGDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FoodBvgDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含食品饮料进出口数据,具体指标可参见API文档;历史数据从1998年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.FOODBVGDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CommTradeDataTRSCG(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含商品零售及限上收入商品零售社会消费品零售总额数据,具体指标可参见API文档;历史数据从1984年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.COMMTRADEDATATRSCG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CommTradeDataSales50LargeEn(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含50家重点大型零售企业销售数据,具体指标可参见API文档;历史数据从2011年7月开始,数据按月更新。
"""
code, result = self.client.getData(vs.COMMTRADEDATASALES50LARGEEN%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CommTradeDataIndexKeyCircEn(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含商业贸易指数及重点流通企业销售数据,如义乌小商品指数、消费者信心指数、RPI,百货店、超市、专业店销售额等,具体指标可参见API文档;历史数据从1993年12月开始,数据按周更新。
"""
code, result = self.client.getData(vs.COMMTRADEDATAINDEXKEYCIRCEN%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CaterTourDataTRSCG(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含餐饮收入及限上单位餐饮收入社会消费品零售总额数据,具体指标可参见API文档;历史数据从2010年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.CATERTOURDATATRSCG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CaterTourDataHotelsOper(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含全国饭店经营性数据,具体指标可参见API文档;历史数据从2011年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.CATERTOURDATAHOTELSOPER%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CaterTourDataNewHotel(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含全国酒店开业统计数据,具体指标可参见API文档;历史数据从2009年9月开始,数据按月更新。
"""
code, result = self.client.getData(vs.CATERTOURDATANEWHOTEL%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def CaterTourDataInboundTour(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含入境旅游接待收汇数据,具体指标可参见API文档;历史数据从2001年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.CATERTOURDATAINBOUNDTOUR%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BioMedicineDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含医药生物行业产销数据,如中成药、化学原料药的产量等,具体指标可参见API文档;历史数据从2001年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.BIOMEDICINEDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BioMedicineDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含医药生物行业进出口数据,如药品、中药材及医疗器械的进出口等,具体指标可参见API文档;历史数据从1998年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.BIOMEDICINEDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def PetrochemDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含石油化工行业价格类数据,如石油价格、主要化工产品价格、中纤价格指数等,具体指标可参见API文档;历史数据从1994年1月开始,数据按日更新。
"""
code, result = self.client.getData(vs.PETROCHEMDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def PetrochemDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含石油化工行业产销及库存数据,具体指标可参见API文档;历史数据从1982年8月开始,数据按周更新。
"""
code, result = self.client.getData(vs.PETROCHEMDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def PetrochemDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含石油化工行业进出口数据,具体指标可参见API文档;历史数据从1998年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.PETROCHEMDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ClothTexDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含服装纺织行业价格类数据,如棉花到厂价、中国棉花价格指数、中国纱线价格指数等,具体指标可参见API文档;历史数据从2006年1月开始,数据按日更新。
"""
code, result = self.client.getData(vs.CLOTHTEXDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ClothTexDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含服装纺织行业产销及库存数据,具体指标可参见API文档;历史数据从1990年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.CLOTHTEXDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ClothTexDataCottonWASDE(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含美国农业部对全球棉花供需预测数据,具体指标可参见API文档;历史数据从1984年6月开始,数据按月更新。
"""
code, result = self.client.getData(vs.CLOTHTEXDATACOTTONWASDE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ClothTexDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含服装纺织行业进出口数据,具体指标可参见API文档;历史数据从1998年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.CLOTHTEXDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def LightManufDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含轻工制造行业价格类数据,如FOEX价格指数,具体指标可参见API文档;历史数据从2012年7月开始,数据按周更新。
"""
code, result = self.client.getData(vs.LIGHTMANUFDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def LightManufDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含轻工制造行业产销及库存数据,具体指标可参见API文档;历史数据从2001年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.LIGHTMANUFDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def LightManufDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含轻工制造行业进出口数据,具体指标可参见API文档;历史数据从2000年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.LIGHTMANUFDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MiningDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含采掘行业价格类数据,即动力煤、焦煤、焦炭、兰炭价格及煤炭海运运价,具体指标可参见API文档;历史数据从1998年12月开始,数据按日更新。
"""
code, result = self.client.getData(vs.MININGDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MiningDataOutpSalesTransp(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含采掘行业产销运数据,具体指标可参见API文档;历史数据从2008年4月开始,数据按日更新。
"""
code, result = self.client.getData(vs.MININGDATAOUTPSALESTRANSP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MiningDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含采掘行业进出口数据,具体指标可参见API文档;历史数据从2000年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.MININGDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FerMetalDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含黑色金属行业价格类数据,如铁矿石价格、螺纹钢价格、线材价格,具体指标可参见API文档;历史数据从2008年12月开始,数据按日更新。
"""
code, result = self.client.getData(vs.FERMETALDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FerMetalDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含黑色金属行业产销及库存数据,具体指标可参见API文档;历史数据从1998年2月开始,数据按周更新。
"""
code, result = self.client.getData(vs.FERMETALDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def FerMetalDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含黑色金属行业进出口数据,具体指标可参见API文档;历史数据从1998年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.FERMETALDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def NonferMetalDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含有色金属行业价格类数据,如铝、铜、铅、锌、锡、镍、金、银等价格类数据,具体指标可参见API文档;历史数据从1968年1月开始,数据按日更新。
"""
code, result = self.client.getData(vs.NONFERMETALDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def NonferMetalDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含有色金属行业产销及库存数据,具体指标可参见API文档;历史数据从1973年3月开始,数据按周更新。
"""
code, result = self.client.getData(vs.NONFERMETALDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def NonferMetalDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含有色金属行业进出口数据,具体指标可参见API文档;历史数据从1995年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.NONFERMETALDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def DeliveryEqDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含交运设备行业价格类数据,如GAIN市场指数及二手车交易价格等,具体指标可参见API文档;历史数据从2011年2月开始,数据按月更新。
"""
code, result = self.client.getData(vs.DELIVERYEQDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def DeliveryEqDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含交运设备行业产销数据,如汽车、船舶、飞机等产量,具体指标可参见API文档;历史数据从1958年3月开始,数据按月更新。
"""
code, result = self.client.getData(vs.DELIVERYEQDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def DeliveryEqDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含交运设备行业进出口数据,如汽车、船舶、飞机等进出口,具体指标可参见API文档;历史数据从1998年2月开始,数据按月更新。
"""
code, result = self.client.getData(vs.DELIVERYEQDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TrafficTransDataRailway(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含交通运输行业中铁路运输数据,如铁路旅客、货物周转量等,具体指标可参见API文档;历史数据从1983年12月开始,数据按月更新。
"""
code, result = self.client.getData(vs.TRAFFICTRANSDATARAILWAY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TrafficTransDataRoad(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含交通运输行业中公路运输数据,如公路旅客、货物周转量等,具体指标可参见API文档;历史数据从1989年2月开始,数据按月更新。
"""
code, result = self.client.getData(vs.TRAFFICTRANSDATAROAD%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TrafficTransDataWaterway(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含交通运输行业中水路运输数据,如波罗的海航运指数,具体指标可参见API文档;历史数据从2007年6月开始,数据按日更新。
"""
code, result = self.client.getData(vs.TRAFFICTRANSDATAWATERWAY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def TrafficTransDataAir(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含交通运输行业中航空运输数据,如航空运输周转量、航班效率等,具体指标可参见API文档;历史数据从1983年8月开始,数据按月更新。
"""
code, result = self.client.getData(vs.TRAFFICTRANSDATAAIR%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UtilIndustryDataPower(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含公用事业行业中电力行业数据,如全国发电量、工业用电量等,具体指标可参见API文档;历史数据从1989年3月开始,数据按月更新。
"""
code, result = self.client.getData(vs.UTILINDUSTRYDATAPOWER%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UtilIndustryDataWater(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含公用事业行业中供水行业数据,如行业营收、毛利率等,具体指标可参见API文档;历史数据从2003年2月开始,数据按月更新。
"""
code, result = self.client.getData(vs.UTILINDUSTRYDATAWATER%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UtilIndustryDataGas(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含公用事业行业中燃气供应数据,如行业营收,毛利率等,具体指标可参见API文档;历史数据从2003年3月开始,数据按月更新。
"""
code, result = self.client.getData(vs.UTILINDUSTRYDATAGAS%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def UtilIndustryDataEnvirProt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含公用事业行业中环保行业数据,如行业固定资产投资及重点城市AQI指数,具体指标可参见API文档;历史数据从2003年3月开始,数据按日更新。
"""
code, result = self.client.getData(vs.UTILINDUSTRYDATAENVIRPROT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ElecCompDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含电子元件行业价格类数据,如面板价格,电子器件价格指数等,具体指标可参见API文档;历史数据从2007年2月开始,数据按日更新。
"""
code, result = self.client.getData(vs.ELECCOMPDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ElecCompDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含电子元件行业产销数据及半导体BB值,具体指标可参见API文档;历史数据从1998年2月开始,数据按月更新。
"""
code, result = self.client.getData(vs.ELECCOMPDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def ElecCompDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含电子元件行业进出口数据,具体指标可参见API文档;历史数据从1998年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.ELECCOMPDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InfoEqptDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含信息设备行业价格类数据,即中国IT市场指数、笔记本市场主流品牌均价、平板电脑市场主流品牌均价,具体指标可参见API文档;历史数据从2007年6月开始,数据按周更新。
"""
code, result = self.client.getData(vs.INFOEQPTDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InfoEqptDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含信息设备行业产销、库存及出货量数据,具体指标可参见API文档;历史数据从1998年2月开始,数据按月更新。
"""
code, result = self.client.getData(vs.INFOEQPTDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InfoEqptDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含信息设备行业进出口数据,具体指标可参见API文档;历史数据从1998年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.INFOEQPTDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HouseholdAplsDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含家用电器行业产销数据,具体指标可参见API文档;历史数据从1998年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.HOUSEHOLDAPLSDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def HouseholdAplsDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含家用电器行业进出口数据,具体指标可参见API文档;历史数据从1998年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.HOUSEHOLDAPLSDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InfoServDataSoftware(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含信息服务软件产业数据,即行业收入、行业固定投资、软件覆盖人数、软件总启动次数、软件总使用时长,具体指标可参见API文档;历史数据从2008年8月开始,数据按周更新。
"""
code, result = self.client.getData(vs.INFOSERVDATASOFTWARE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InfoServDataComm(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含信息服务通信产业数据,即通信运营、邮政运营,具体指标可参见API文档;历史数据从1997年12月开始,数据按月更新。
"""
code, result = self.client.getData(vs.INFOSERVDATACOMM%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InfoServDataInternet(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含信息服务互联网产业数据,即市场规模、访问次数,具体指标可参见API文档;历史数据从2006年3月开始,数据按周更新。
"""
code, result = self.client.getData(vs.INFOSERVDATAINTERNET%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RealEstDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含房地产行业价格类数据,如中国一手房价、中国二手房价、全球房价等,具体指标可参见API文档;历史数据从1994年11月开始,数据按月更新。
"""
code, result = self.client.getData(vs.REALESTDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RealEstDataInvestDvpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含房地产行业投资开发及投资资金来源数据,具体指标可参见API文档;历史数据从1994年2月开始,数据按月更新。
"""
code, result = self.client.getData(vs.REALESTDATAINVESTDVPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RealEstDataLand(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含房地产行业土地市场数据,如土地成交数据和土地供应数据,具体指标可参见API文档;历史数据从2013年3月开始,数据按月更新。
"""
code, result = self.client.getData(vs.REALESTDATALAND%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def RealEstDataSales(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含房地产行业销售和库存数据,具体指标可参见API文档;历史数据从1991年2月开始,数据按周更新。
"""
code, result = self.client.getData(vs.REALESTDATASALES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BldgMaterDataPrice(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含建筑建材行业价格类数据,如水泥价格、玻璃价格等,具体指标可参见API文档;历史数据从2009年12月开始,数据按日更新。
"""
code, result = self.client.getData(vs.BLDGMATERDATAPRICE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BldgMaterDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含建筑建材行业产销及库存数据,具体指标可参见API文档;历史数据从1990年1月开始,数据按季更新。
"""
code, result = self.client.getData(vs.BLDGMATERDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MchnrEqptDataSalesOutput(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含机械设备行业产销数据,如仪器仪表产量、专用设备产量、通用设备产量、工程机械产销等,具体指标可参见API文档;历史数据从1990年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.MCHNREQPTDATASALESOUTPUT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def MchnrEqptDataImptExpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含机械设备行业进出口数据,具体指标可参见API文档;历史数据从2010年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.MCHNREQPTDATAIMPTEXPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BankDataAssetsLiabilities(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含银行资产负债数据,如商业银行总资产、总负债等,具体指标可参见API文档;历史数据从2011年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.BANKDATAASSETSLIABILITIES%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def BankDataNonPerformingLoans(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含商业银行业不良贷款数据,如分级别的商业银行不良贷款余额及贷款率,具体指标可参见API文档;历史数据从2003年6月开始,数据按季更新。
"""
code, result = self.client.getData(vs.BANKDATANONPERFORMINGLOANS%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def SecuritiesDataOperIndic(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含证券业经营性指标,如证券业总资产、净资本及营收、利润等,具体指标可参见API文档;历史数据从2008年12月开始,数据按半年更新。
"""
code, result = self.client.getData(vs.SECURITIESDATAOPERINDIC%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InsDataPremPryInsurance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含原保险保费收入数据,如分险种的保费收入,具体指标可参见API文档;历史数据从1999年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.INSDATAPREMPRYINSURANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InsDataClaimPayment(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含保险业保险赔付数据,如分险种的保险业务赔款和给付金额,具体指标可参见API文档;历史数据从1999年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.INSDATACLAIMPAYMENT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InsDataFundBalance(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含保险资金运用余额数据,如银行存款、股票和债券投资基金等,具体指标可参见API文档;历史数据从1999年1月开始,数据按月更新。
"""
code, result = self.client.getData(vs.INSDATAFUNDBALANCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def InsDataAssets(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
包含保险业总资产和净资产数据,如分险种的保险总资产和净资产,具体指标可参见API文档;历史数据从1999年3月开始,数据按月更新。
"""
code, result = self.client.getData(vs.INSDATAASSETS%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataYili(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含伊利股份(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAYILI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataGuangming(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含光明乳业(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAGUANGMING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataChengDeLolo(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含承德露露(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATACHENGDELOLO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataQiaqia(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含洽洽食品(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAQIAQIA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataVVGroup(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含维维股份(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAVVGROUP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataJinfengWine(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含金枫酒业(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAJINFENGWINE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataGuyueLongshan(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含古越龙山(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAGUYUELONGSHAN%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataShanxiFenjiu(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含山西汾酒(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASHANXIFENJIU%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataZhangyuA(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含张裕A(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAZHANGYUA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataMogao(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含莫高股份(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAMOGAO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataKemenNoodleMFG(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含克明面业(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAKEMENNOODLEMFG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataJinziHam(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含金字火腿(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAJINZIHAM%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataLotus(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含莲花味精(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATALOTUS%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataBeiyinMate(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含贝因美(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATABEIYINMATE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataQingdaoHaier(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含青岛海尔(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAQINGDAOHAIER%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataTCLGroup(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含TCL集团(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATATCLGROUP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataMideaGroup(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含美的集团(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAMIDEAGROUP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataWhirlpool(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含惠而浦(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAWHIRLPOOL%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataJoyoung(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含九阳股份(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAJOYOUNG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataVatti(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含华帝股份(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAVATTI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataSupor(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含苏泊尔(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASUPOR%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataKonka(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含深康佳A(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAKONKA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataChanghong(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含四川长虹(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATACHANGHONG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataLittleSwan(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含小天鹅A(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年10月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATALITTLESWAN%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataMeiling(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含美菱电器(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAMEILING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataZTE(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含中兴通讯(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAZTE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataDatangTelecom(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含大唐电信(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATADATANGTELECOM%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataBird(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含波导股份(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATABIRD%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataDahuaTechnology(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含大华股份(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATADAHUATECHNOLOGY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataTsinghuaTongfang(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含同方股份(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATATSINGHUATONGFANG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHedy(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含七喜控股(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年10月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHEDY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHaday(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含海天味业(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHADAY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataYanjingBeer(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含燕京啤酒(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAYANJINGBEER%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataMaiquer(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含麦趣尔(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年10月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAMAIQUER%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataCiticGuoanWine(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含中葡股份(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATACITICGUOANWINE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataQingqingBarleyWine(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含青青稞酒(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年9月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAQINGQINGBARLEYWINE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHaoxiangni(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含好想你(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHAOXIANGNI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataFulingZhacai(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含涪陵榨菜(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAFULINGZHACAI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHuangshanghuang(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含煌上煌(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHUANGSHANGHUANG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHainanYedao(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含海南椰岛(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHAINANYEDAO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataShuangtaFood(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含双塔食品(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASHUANGTAFOOD%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataJiuguiLiquor(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含酒鬼酒(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAJIUGUILIQUOR%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataBlackSesame(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含黑芝麻(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATABLACKSESAME%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataKingsLuck(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含今世缘(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAKINGSLUCK%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataLaobaiganLiquor(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含老白干酒(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATALAOBAIGANLIQUOR%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataShuanghuiDvpt(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含双汇发展(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档。历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASHUANGHUIDVPT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataWuliangye(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAWULIANGYE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataGree(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年11月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAGREE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHisenseElectric(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHISENSEELECTRIC%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHisense(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHISENSE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataJiajiaFood(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年9月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAJIAJIAFOOD%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataRobam(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAROBAM%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataASD(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAASD%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataMacro(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAMACRO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataElecpro(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAELECPRO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataSanglejin(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年10月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASANGLEJIN%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHoma(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHOMA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataLongdaMeat(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATALONGDAMEAT%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataByHealth(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATABYHEALTH%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHaixin(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHAIXIN%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataVanward(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAVANWARD%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataMeida(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAMEIDA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHengshunVinegarindustry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年9月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHENGSHUNVINEGARINDUSTRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataShuijingfang(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年11月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASHUIJINGFANG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataChunlan(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年11月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATACHUNLAN%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataYilite(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAYILITE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHuangshi(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHUANGSHI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataYanghe(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAYANGHE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataSanyuan(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASANYUAN%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataTuopaiShede(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATATUOPAISHEDE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataKuaijishan(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAKUAIJISHAN%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataTonghua(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATATONGHUA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataKweichowMoutaiGroup(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAKWEICHOWMOUTAIGROUP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataTsingTao(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATATSINGTAO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataGujing(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年11月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAGUJING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataLuzhouLaojiao(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATALUZHOULAOJIAO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataShanghaiMaling(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年8月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASHANGHAIMALING%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataBlackCattleFood(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATABLACKCATTLEFOOD%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataDelisi(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATADELISI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataStarLakeBioscience(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASTARLAKEBIOSCIENCE%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataJonjeeHiTech(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAJONJEEHITECH%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataCRSanjiu(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年11月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATACRSANJIU%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataJiuzhitang(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年11月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAJIUZHITANG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataFuanna(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAFUANNA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataLuolai(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATALUOLAI%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataGuirenniao(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAGUIRENNIAO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataBaoxiniao(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATABAOXINIAO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataLaofengxiang(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATALAOFENGXIANG%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataFiytaA(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAFIYTAA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataGoldleafJewelry(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAGOLDLEAFJEWELRY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataComixGroup(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATACOMIXGROUP%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataYaojiPlayingCard(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAYAOJIPLAYINGCARD%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataMGStationery(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAMGSTATIONERY%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataCS(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATACS%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataEdifier(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAEDIFIER%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataHikVision(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAHIKVISION%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataSolareast(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATASOLAREAST%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataChigo(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2014年12月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATACHIGO%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def EcommerceDataAucma(self, indicID='', indicName='', beginDate='', endDate='', field=''):
"""
可包含(公司、业务、产品等维度)电商(淘宝及天猫)数据,具体指标可参见API文档;历史数据从2015年1月开始,数据频度为日度,按月更新。
"""
code, result = self.client.getData(vs.ECOMMERCEDATAAUCMA%(indicID, indicName, beginDate, endDate, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
Justin-Tan/hep-analysis | classifiers/xgb/hyperband/methods/xgb.py | 1 | 4884 | #!/usr/bin/env python
# xgb methods required for HyperBand algorithm
import pandas as pd
import xgboost as xgb
import sys, time, os
from hyperopt import hp
import hyperopt.pyll.stochastic
from hyperband import Hyperband
from pprint import pprint
def xgb_hyp():
gbtree_hyp = {
'booster': 'gbtree',
'eta': hp.uniform('lr', 0.01, 0.15),
'gamma': hp.uniform('mlr', 0.05, 2.5),
'min_child_weight': hp.uniform('mcw', 0, 2),
'max_depth': hp.quniform('md', 3, 9, 1),
'subsample': hp.uniform('ss', 0.7, 1),
'colsample_bytree': hp.uniform('cs', 0.7, 1),
'objective': 'binary:logistic',
'silent': 1
}
dart_hyp = {
'booster': 'dart',
'sample_type': hp.choice('dart_st', ['uniform', 'weighted']),
'normalize_type': hp.choice('dart_nt', ['tree', 'forest']),
'rate_drop': hp.uniform('dropout', 0, 0.3),
'skip_drop': hp.uniform('skip', 0, 0.25)
}
return gbtree_hyp, dart_hyp
def load_data(fname, test_size = 0.05):
from sklearn.model_selection import train_test_split
df = pd.read_hdf(fname, 'df')
# Split data into training, testing sets
df_X_train, df_X_test, df_y_train, df_y_test = train_test_split(df.drop(['labels', 'mbc', 'deltae'], axis = 1),
df['labels'], test_size = test_size, random_state=42)
dTrain = xgb.DMatrix(data = df_X_train.values, label = df_y_train.values, feature_names = df_X_train.columns)
dTest = xgb.DMatrix(data = df_X_test.values, label = df_y_test.values, feature_names = df_X_test.columns)
print('# Features: {} | # Train Samples: {} | # Test Samples: {}'.format(dTrain.num_col(),
dTrain.num_row(), dTest.num_row()))
# Save to XGBoost binary file for faster loading
dTrain.save_binary("dTrain.buffer")
dTest.save_binary("dTest.buffer")
return dTrain, dTest
def load_data_exclusive(fname, mode, channel, test_size = 0.05):
from sklearn.model_selection import train_test_split
# Split data into training, testing sets
df = pd.read_hdf(fname, 'df')
df_X_train, df_X_test, df_y_train, df_y_test = train_test_split(df[df.columns[:-1]], df['labels'],
test_size = test_size, random_state = 24601)
dTrain = xgb.DMatrix(data = df_X_train.values, label = df_y_train.values, feature_names = df.columns[:-1])
dTest = xgb.DMatrix(data = df_X_test.values, label = df_y_test.values, feature_names = df.columns[:-1])
# Save to XGBoost binary file for faster loading
dTrain.save_binary("dTrain" + mode + channel + ".buffer")
dTest.save_binary("dTest" + mode + channel + ".buffer")
return dTrain, dTest
def train_hyp_config(data, hyp_params, num_boost_rounds):
# Returns validation metric after training configuration for allocated resources
# Inputs: data - DMatrix tuple: (train, test)
# Add evaluation metrics for validation set
hyp_params['eval_metric'] = '[email protected]'
pList = list(hyp_params.items())+[('eval_metric', 'auc')]
# Number of boosted trees to construct
nTrees = num_boost_rounds
# Specify validation set to watch performance
dTrain, dTest = data[0], data[1]
evalList = [(dTrain,'train'), (dTest,'eval')]
print("Starting model training\n")
start_time = time.time()
# Train the model using the above parameters
bst = xgb.train(params = pList, dtrain = dTrain, evals = evalList, num_boost_round = nTrees,
early_stopping_rounds = 256, verbose_eval = int(min(64, num_boost_rounds/2)))
delta_t = time.time() - start_time
print("Training ended. Elapsed time: (%.3f s)." %(delta_t))
pprint(bst.attributes())
evalDict = {'auc': float(bst.attr('best_score')), '[email protected]': bst.attr('best_msg').split('\t')[-2],
'best_iteration': int(bst.attr('best_iteration'))}
return evalDict
def get_hyp_config():
# Returns a set of i.i.d samples from a distribution over the hyperparameter space
gbtree_hyp, dart_hyp = xgb_hyp()
space = hp.choice('booster', [gbtree_hyp, {**gbtree_hyp, **dart_hyp}])
params = hyperopt.pyll.stochastic.sample(space)
for k, v in params.items():
if type(v) == float and int(v) == v:
params[k] = int(v)
params = {k: v for k, v in params.items() if v is not 'default'}
return params
def run_hyp_config(data, hyp_params, n_iterations, rounds_per_iteration = 64):
"""
Input: Training data, Hyperparameter configuration (t); resource allocation (r)
Returns: Validation metric after training configuration for allocated resources
"""
num_boost_rounds = int(round(n_iterations*rounds_per_iteration))
print("Boosting iterations: %d"%(num_boost_rounds))
pprint(hyp_params)
return train_hyp_config(data, hyp_params, num_boost_rounds)
| gpl-3.0 |
anirudhjayaraman/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
wlamond/scikit-learn | sklearn/linear_model/tests/test_base.py | 83 | 15089 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.