repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
robin-lai/scikit-learn | doc/sphinxext/gen_rst.py | 106 | 40198 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/ensemble/gradient_boosting.py | 50 | 67625 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._splitter import PresortBestSplitter
from ..tree._criterion import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
ankurankan/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
shiguol/tushare | tushare/stock/shibor.py | 38 | 5010 | # -*- coding:utf-8 -*-
"""
上海银行间同业拆放利率(Shibor)数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
from tushare.util import dateu as du
def shibor_data(year=None):
"""
获取上海银行间同业拆放利率(Shibor)
Parameters
------
year:年份(int)
Return
------
date:日期
ON:隔夜拆放利率
1W:1周拆放利率
2W:2周拆放利率
1M:1个月拆放利率
3M:3个月拆放利率
6M:6个月拆放利率
9M:9个月拆放利率
1Y:1年拆放利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Shibor']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor',
year, lab,
year))
df.columns = ct.SHIBOR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_quote_data(year=None):
"""
获取Shibor银行报价数据
Parameters
------
year:年份(int)
Return
------
date:日期
bank:报价银行名称
ON:隔夜拆放利率
ON_B:隔夜拆放买入价
ON_A:隔夜拆放卖出价
1W_B:1周买入
1W_A:1周卖出
2W_B:买入
2W_A:卖出
1M_B:买入
1M_A:卖出
3M_B:买入
3M_A:卖出
6M_B:买入
6M_A:卖出
9M_B:买入
9M_A:卖出
1Y_B:买入
1Y_A:卖出
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Quote']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Quote',
year, lab,
year), skiprows=[0])
df.columns = ct.QUOTE_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_ma_data(year=None):
"""
获取Shibor均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
其它分别为各周期5、10、20均价
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.SHIBOR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_data(year=None):
"""
获取贷款基础利率(LPR)
Parameters
------
year:年份(int)
Return
------
date:日期
1Y:1年贷款基础利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR',
year, lab,
year))
df.columns = ct.LPR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_ma_data(year=None):
"""
获取贷款基础利率均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
1Y_5:5日均值
1Y_10:10日均值
1Y_20:20日均值
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR_Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.LPR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
| bsd-3-clause |
TinyOS-Camp/DDEA-DEV | Archive/[14_09_13] Code_Base/lib_bnlearn.py | 6 | 12617 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 24 19:24:11 2014
@author: NGO Quang Minh Khiem
@e-mail: [email protected]
"""
from __future__ import division # To forace float point division
import numpy as np
from pandas import DataFrame
# R libs
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
import pandas.rpy.common as com
from rpy2.robjects import pandas2ri
import networkx as nx
import matplotlib.pyplot as plt
#============================================================#
# Utility functions and Misc
#============================================================#
def write_to_file(filename,text):
with open(filename,'w') as f:
f.write(text)
# Close X11 window
def dev_off():
r['dev.off']()
#============================================================#
# Methods for Plotting
#============================================================#
# visualize graph from adjacence matrix r_graph
# for quick usage: set simple=True (by default)
# otherwise, function allows customize some properties of the graph
def nx_plot(r_graph, cols_names, simple=True, labels=None, graph_layout='shell',
node_size=1600, node_color='blue', node_alpha=0.3,
node_text_size=12,
edge_color='blue', edge_alpha=0.3, edge_tickness=1,
edge_text_pos=0.3,
text_font='sans-serif'):
#G = nx.Graph()
dg = nx.DiGraph()
edges = []
np_amat = np.asarray(bnlearn.amat(r_graph))
for ri in range(np_amat.shape[0]):
for ci in range(np_amat.shape[1]):
if np_amat[ri,ci] == 1:
#G.add_edge(cols_names[ri],cols_names[ci])
dg.add_edge(cols_names[ri],cols_names[ci])
edges.append((cols_names[ri],cols_names[ci]))
#import pdb;pdb.set_trace()
if simple:
if graph_layout=='spectral':
nx.draw_spectral(dg,font_size=node_text_size)
elif graph_layout=='random':
nx.draw_random(dg,font_size=node_text_size)
elif graph_layout=='circular':
nx.draw_circular(dg,font_size=node_text_size)
elif graph_layout=='spring':
nx.draw_spring(dg,font_size=node_text_size)
else:
nx.draw(dg,font_size=node_text_size)
else:
draw_graph(edges,directed=True, labels=labels, graph_layout=graph_layout,
node_size=node_size, node_color=node_color, node_alpha=node_alpha,
node_text_size=node_text_size,
edge_color=edge_color, edge_alpha=edge_alpha, edge_tickness=edge_tickness,
edge_text_pos=edge_text_pos,
text_font=text_font)
#nxlib.draw_graph(dg,labels=cols_names)
def nx_plot2(r_graph,cols_names,is_bnlearn=True):
G = nx.Graph()
dg = nx.DiGraph()
if is_bnlearn:
np_amat = np.asarray(bnlearn.amat(r_graph))
for ri in range(np_amat.shape[0]):
for ci in range(np_amat.shape[1]):
if np_amat[ri,ci] == 1:
G.add_edge(cols_names[ri],cols_names[ci])
dg.add_edge(cols_names[ri],cols_names[ci])
else:
np_amat = np.asarray(r_graph)
for ri in range(np_amat.shape[0]):
for ci in range(np_amat.shape[1]):
if np_amat[ri,ci] >= 0:
#G.add_weighted_edges_from([(cols_names[ri],cols_names[ci],{'weight': np_amat[ri,ci]})])
G.add_edge(cols_names[ri],cols_names[ci],weight=np_amat[ri,ci])
#dg.add_weighted_edges_from([(cols_names[ri],cols_names[ci],np_amat[ri,ci])])
#nx.draw(G,nx.shell_layout)
nx.draw(G)
#nxlib.draw_graph(dg,labels=cols_names)
# a more generic graph plotting function, using networkx lib
# graph is a list of edges
def draw_graph(graph, directed=True, labels=None, graph_layout='shell',
node_size=1600, node_color='blue', node_alpha=0.3,
node_text_size=12,
edge_color='blue', edge_alpha=0.3, edge_tickness=1,
edge_text_pos=0.3,
text_font='sans-serif'):
# create networkx graph
#G=nx.Graph()
if directed:
G = nx.DiGraph()
else:
G = nx.Graph()
# add edges
for edge in graph:
G.add_edge(edge[0], edge[1])
# these are different layouts for the network you may try
# shell seems to work best
if graph_layout == 'spring':
graph_pos=nx.spring_layout(G)
elif graph_layout == 'spectral':
graph_pos=nx.spectral_layout(G)
elif graph_layout == 'random':
graph_pos=nx.random_layout(G)
else:
graph_pos=nx.shell_layout(G)
# draw graph
nx.draw_networkx_nodes(G,graph_pos,node_size=node_size,
alpha=node_alpha, node_color=node_color)
nx.draw_networkx_edges(G,graph_pos,width=edge_tickness,
alpha=edge_alpha,edge_color=edge_color)
nx.draw_networkx_labels(G, graph_pos,font_size=node_text_size,
font_family=text_font)
"""
if labels is None:
labels = range(len(graph))
edge_labels = dict(zip(graph, labels))
"""
if labels is not None:
edge_labels = dict(zip(graph, labels))
nx.draw_networkx_edge_labels(G, graph_pos, edge_labels=edge_labels,
label_pos=edge_text_pos)
# show graph
plt.show()
#============================================================#
# bnlearn wrapper APIs
#============================================================#
###
# Construct list of arcs used for blacklisting/whitelisting
# arc list is a list of arcs. For example:
# arc_list =
# [['A','B'] , ['A','C']]
#
# return data frame in the following format
# from to
# 0 A B
# 1 A C
###
def construct_arcs_frame(arc_list):
data_frame = DataFrame(data=np.array(arc_list),columns=['from','to'])
return data_frame
def print_bw_rules():
rules = """
============================================================
Blacklisting Rules:
-------------------
1. any arc blacklisted in one of its possible directions is never present in the graph.
if A-->B is blacklisted (but B-->A is not), A-->B and A--B are never
present in the graph (but not B-->A)
2. any arc blacklisted in both directions, as well as the corresponding
undirected arc, is never present in the graph.
B(A-->B,B-->A) => B(A--B)
Whitelisting Rules:
-------------------
1. arcs whitelisted in one direction only (i.e. A-->B is whitelisted but B-->A is not)
have the respective reverse arcs blacklisted,
and are always present in the graph.
W(A-->B) => B(B-->A,A--B)
2. arcs whitelisted in both directions (i.e. both A--> B and B-->A are whitelisted)
are present in the graph,
but their direction is set by the learning algorithm.
3. any arc whitelisted and blacklisted at the same time is assumed to be whitelisted,
and is thus removed from the blacklist.
============================================================
"""
print rules
def convert_pymat_to_rfactor(py_mat):
mat_shape = py_mat.shape
r_factor_vec = r.factor(py_mat)
r_factor_mat = r.matrix(r_factor_vec, nrow=mat_shape[1], byrow=True)
return np.array(r_factor_mat).reshape(mat_shape[0],mat_shape[1],order='C')
def construct_data_frame(data_mat,columns=[]):
if len(columns) == 0:
column_names = range(data_mat.shape[1])
else:
column_names = columns
return DataFrame(data=data_mat,columns=column_names)
"""
def py_bnlearn(data_frame,method='gs',blacklist=None, whitelist=None):
# For hill-climbing, the data must be real or factor
#
if method == 'hc':
bn_structure = bnlearn.hc(data_frame)
else:
bn_structure = bnlearn.gs(data_frame)
return bn_structure
"""
#============================================================#
# APIs related to bn_learn structure
#============================================================#
#=======================|
# bn structure and graph|
#=======================|
def acyclic(bn_structure):
return bool(bnlearn.acyclic(bn_structure)[0])
def amat(bn_structure):
return np.array(bnlearn.amat(bn_structure))
def py_get_amat(bn_structure):
return np.array(bnlearn.amat(bn_structure))
#=======================|
# Arcs |
#=======================|
def narcs(bn_structure):
return bnlearn.narcs(bn_structure)[0]
def arcs(bn_structure):
arcs = np.array(bnlearn.arcs(bn_structure))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def directed_arcs(bn_structure):
arcs = np.array(bnlearn.directed_arcs(bn_structure))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def undirected_arcs(bn_structure):
arcs = np.array(bnlearn.undirected_arcs(bn_structure))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def incoming_arcs(bn_structure, node_name):
arcs = np.array(bnlearn.incoming_arcs(bn_structure, node_name))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def outgoing_arcs(bn_structure, node_name):
arcs = np.array(bnlearn.outgoing_arcs(bn_structure, node_name))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
#=======================|
# Nodes |
#=======================|
def nnodes(bn_structure):
return bnlearn.nnodes(bn_structure)[0]
def degree(bn_structure, node_name):
return bnlearn.degree(bn_structure, node_name)[0]
def in_degree(bn_structure, node_name):
return bnlearn.in_degree(bn_structure, node_name)[0]
def out_degree(bn_structure, node_name):
return bnlearn.out_degree(bn_structure, node_name)[0]
def root_nodes(bn_structure):
return np.array(bnlearn.root_nodes(bn_structure))
def leaf_nodes(bn_structure):
return np.array(bnlearn.leaf_nodes(bn_structure))
def children(bn_structure, node_name):
return np.array(bnlearn.children(bn_structure, node_name))
def parents(bn_structure, node_name):
return np.array(bnlearn.parents(bn_structure, node_name))
def nbr(bn_structure, node_name):
return np.array(bnlearn.nbr(bn_structure, node_name))
#=======================|
# bn fit |
#=======================|
###
# To fit data to bn structure, the graph must be completely directed
###
def py_bn_fit(bn_structure,data_frame):
fit = bnlearn.bn_fit(bn_structure,data_frame)
return fit
def py_get_node_cond_mat(fit,node_indx):
"""
Each item in fit is a list vector with dimension attributes
fit[node_indx] has 4 attributes ['node', 'parents', 'children', 'prob']
"""
node_fit = fit[node_indx]
node = node_fit[0]
parents = node_fit[1]
children = node_fit[2]
prob = node_fit[3]
"""
prob is a vector Array type in R, which contains the conditional
probability table of this node.
prob is a (n_0 x n_1 x ... x n_parents) matrix, where each n_i is the number
of discrete values of each node in the list prob_dimnames
prob_dimnames contains the name of each dimension.
"""
prob_dimnames = np.array(prob.dimnames.names)
prob_factors = np.array(prob.dimnames)
prob_mat = np.array(prob)
#prob_frame = DataFrame(data=prob_mat[0],columns=prob_dimnames)
return prob_dimnames,prob_factors,prob_mat
def bn_fit_barchart(fit, node_idx):
print bnlearn.bn_fit_barchart(fit[node_idx])
def bn_fit_dotplot(fit, node_idx):
print bnlearn.bn_fit_dotplot(fit[node_idx])
#==========================================================================#
#==========================================================================#
#==========================================================================#
#============================================================#
# Use R bnlearn to learn the Bayes network structure
#============================================================#
### BN Learn
## load some R libs
r = robjects.r
utils = importr("utils")
bnlearn = importr("bnlearn")
#rgraphviz = importr("Rgraphviz")
pandas2ri.activate() ### this is important to seamlessly convert from pandas to R data frame
"""
a = com.load_data('learning.test')
#d = construct_data_frame(a)
gs = py_bnlearn(a)
amat = py_get_amat(gs)
#fit = py_bn_fit(gs,a)
""" | gpl-2.0 |
StudTeam6/competition | sw/airborne/test/math/compare_utm_enu.py | 77 | 2714 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..")
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from pprz_math.geodetic import *
from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3
from math import radians, degrees, tan
import matplotlib.pyplot as plt
import numpy as np
# Origin at ENAC
UTM_EAST0 = 377349 # in m
UTM_NORTH0 = 4824583 # in m
UTM_ZONE0 = 31
ALT0 = 147.000 # in m
utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0)
print("origin %s" % utm_origin)
lla_origin = utm_origin.to_lla()
ecef_origin = lla_origin.to_ecef()
ltp_origin = ecef_origin.to_ltp_def()
print(ltp_origin)
# convergence angle to "true north" is approx 1 deg here
earth_radius = 6378137.0
n = 0.9996 * earth_radius
UTM_DELTA_EAST = 500000.
dist_to_meridian = utm_origin.east - UTM_DELTA_EAST
conv = dist_to_meridian / n * tan(lla_origin.lat)
# or (middle meridian of UTM zone 31 is at 3deg)
#conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat))
print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv))
# Rotation matrix to correct for "true north"
R = DoubleEulers(psi=-conv).to_rmat()
# calculate ENU coordinates for 100 points in 100m distance
nb_points = 100
dist_points = 100
enu_res = np.zeros((nb_points, 2))
enu_res_c = np.zeros((nb_points, 2))
utm_res = np.zeros((nb_points, 2))
for i in range(0, nb_points):
utm = UtmCoor_d()
utm.north = i * dist_points + utm_origin.north
utm.east = i * dist_points+ utm_origin.east
utm.alt = utm_origin.alt
utm.zone = utm_origin.zone
#print(utm)
utm_res[i, 0] = utm.east - utm_origin.east
utm_res[i, 1] = utm.north - utm_origin.north
lla = utm.to_lla()
#print(lla)
ecef = lla.to_ecef()
enu = ecef.to_enu(ltp_origin)
enu_res[i, 0] = enu.x
enu_res[i, 1] = enu.y
enu_c = R * DoubleVect3(enu.x, enu.y, enu.z)
enu_res_c[i, 0] = enu_c.x
enu_res_c[i, 1] = enu_c.y
#print(enu)
dist = np.linalg.norm(utm_res, axis=1)
error = np.linalg.norm(utm_res - enu_res, axis=1)
error_c = np.linalg.norm(utm_res - enu_res_c, axis=1)
plt.figure(1)
plt.subplot(311)
plt.title("utm vs. enu")
plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU")
plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM")
plt.ylabel("y/north [m]")
plt.xlabel("x/east [m]")
plt.legend(loc='upper left')
plt.subplot(312)
plt.plot(dist, error, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error [m]")
plt.subplot(313)
plt.plot(dist, error_c, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error with north fix [m]")
plt.show()
| gpl-2.0 |
AlexRobson/nilmtk | nilmtk/stats/tests/test_locategoodsections.py | 5 | 4547 | #!/usr/bin/python
from __future__ import print_function, division
import unittest
from os.path import join
import numpy as np
import pandas as pd
from datetime import timedelta
from nilmtk.stats import GoodSections
from nilmtk.stats.goodsectionsresults import GoodSectionsResults
from nilmtk import TimeFrame, ElecMeter, DataSet
from nilmtk.datastore import HDFDataStore
from nilmtk.elecmeter import ElecMeterID
from nilmtk.tests.testingtools import data_dir
METER_ID = ElecMeterID(instance=1, building=1, dataset='REDD')
class TestLocateGoodSections(unittest.TestCase):
@classmethod
def setUpClass(cls):
filename = join(data_dir(), 'energy_complex.h5')
cls.datastore = HDFDataStore(filename)
ElecMeter.load_meter_devices(cls.datastore)
cls.meter_meta = cls.datastore.load_metadata('building1')['elec_meters'][METER_ID.instance]
@classmethod
def tearDownClass(cls):
cls.datastore.close()
def test_pipeline(self):
meter1 = ElecMeter(store=self.datastore, metadata=self.meter_meta,
meter_id=METER_ID)
# load co_test.h5
dataset = DataSet(join(data_dir(), 'co_test.h5'))
meter2 = dataset.buildings[1].elec.mains()
for meter in [meter1, meter2]:
for chunksize in [None, 2**10, 2**29]:
if chunksize is None:
kwargs = {}
else:
kwargs = {'chunksize': chunksize}
source_node = meter.get_source_node(**kwargs)
good_sections = GoodSections(source_node)
good_sections.run()
combined = good_sections.results.simple()
meter.clear_cache()
meter.good_sections(**kwargs)
meter.good_sections(**kwargs)
meter.clear_cache()
dataset.store.close()
def test_process_chunk(self):
MAX_SAMPLE_PERIOD = 10
metadata = {'device': {'max_sample_period': MAX_SAMPLE_PERIOD}}
# 0 1 2 3 4 5 6 7
secs = [0,10,20,30, 50,60, 100, 200,
# 8 9 10 11 12 13 14 15 16
250,260,270,280,290,300, 350,360,370]
index = pd.DatetimeIndex([pd.Timestamp('2011-01-01 00:00:00') +
timedelta(seconds=sec) for sec in secs])
df = pd.DataFrame(data=np.random.randn(len(index), 3), index=index,
columns=['a', 'b', 'c'])
df.timeframe = TimeFrame(index[0], index[-1])
df.look_ahead = pd.DataFrame()
locate = GoodSections()
locate.results = GoodSectionsResults(MAX_SAMPLE_PERIOD)
locate._process_chunk(df, metadata)
results = locate.results.combined()
self.assertEqual(len(results), 4)
self.assertEqual(results[0].timedelta.total_seconds(), 30)
self.assertEqual(results[1].timedelta.total_seconds(), 10)
self.assertEqual(results[2].timedelta.total_seconds(), 50)
self.assertEqual(results[3].timedelta.total_seconds(), 20)
# Now try splitting data into multiple chunks
timestamps = [
pd.Timestamp("2011-01-01 00:00:00"),
pd.Timestamp("2011-01-01 00:00:40"),
pd.Timestamp("2011-01-01 00:01:20"),
pd.Timestamp("2011-01-01 00:04:20"),
pd.Timestamp("2011-01-01 00:06:20")
]
for split_point in [[4, 6, 9, 17], [4, 10, 12, 17]]:
locate = GoodSections()
locate.results = GoodSectionsResults(MAX_SAMPLE_PERIOD)
df.results = {}
prev_i = 0
for j, i in enumerate(split_point):
cropped_df = df.iloc[prev_i:i]
cropped_df.timeframe = TimeFrame(timestamps[j],
timestamps[j+1])
try:
cropped_df.look_ahead = df.iloc[i:]
except IndexError:
cropped_df.look_ahead = pd.DataFrame()
prev_i = i
locate._process_chunk(cropped_df, metadata)
results = locate.results.combined()
self.assertEqual(len(results), 4)
self.assertEqual(results[0].timedelta.total_seconds(), 30)
self.assertEqual(results[1].timedelta.total_seconds(), 10)
self.assertEqual(results[2].timedelta.total_seconds(), 50)
self.assertEqual(results[3].timedelta.total_seconds(), 20)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
go-bears/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cocoaagg.py | 70 | 8970 | from __future__ import division
"""
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad ([email protected])
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
import os, sys
try:
import objc
except:
print >>sys.stderr, 'The CococaAgg backend required PyObjC to be installed!'
print >>sys.stderr, ' (currently testing v1.3.7)'
sys.exit()
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase
from backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasCocoaAgg(thisFig)
return FigureManagerCocoaAgg(canvas, num)
def show():
for manager in Gcf.get_all_fig_managers():
manager.show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(0,0),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_press_event(loc.x, loc.y, button)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print >>sys.stderr, 'Unable to load Matplotlib Cocoa UI!'
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
FigureManager = FigureManagerCocoaAgg
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( u'GetCurrentProcess', S(OSErr, OUTPSN) ),
( u'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( u'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( u'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, unicode):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print >>sys.stderr, 'ApplicationServices missing'
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print >>sys.stderr, 'Missing', fn
return False
err, psn = d['GetCurrentProcess']()
if err:
print >>sys.stderr, 'GetCurrentProcess', (err, psn)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print >>sys.stderr, 'CPSSetProcessName', (err, psn)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print >>sys.stderr, 'SetFrontProcess', (err, psn)
return False
return True
| agpl-3.0 |
nvoron23/scipy | scipy/stats/kde.py | 8 | 18243 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
kde.integrate_gaussian(mean, cov) : float
Multiply pdf with a specified Gaussian and integrate over the whole
domain.
kde.integrate_box_1d(low, high) : float
Integrate pdf (1D only) between two bounds.
kde.integrate_box(low_bounds, high_bounds) : float
Integrate pdf over a rectangular space between low_bounds and
high_bounds.
kde.integrate_kde(other_kde) : float
Integrate two kernel density estimates multiplied together.
kde.pdf(points) : ndarray
Alias for ``kde.evaluate(points)``.
kde.logpdf(points) : ndarray
Equivalent to ``np.log(kde.evaluate(points))``.
kde.resample(size=None) : ndarray
Randomly sample a dataset from the estimated pdf.
kde.set_bandwidth(bw_method='scott') : None
Computes the bandwidth, i.e. the coefficient that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
.. versionadded:: 0.11.0
kde.covariance_factor : float
Computes the coefficient (`kde.factor`) that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
The default is `scotts_factor`. A subclass can overwrite this method
to provide a different method, or set it through a call to
`kde.set_bandwidth`.
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
>>> "Measurement model, return two coupled measurements."
>>> m1 = np.random.normal(size=n)
>>> m2 = np.random.normal(scale=0.5, size=n)
>>> return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=np.float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
| bsd-3-clause |
gengliangwang/spark | python/pyspark/pandas/data_type_ops/datetime_ops.py | 1 | 3349 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import warnings
from typing import TYPE_CHECKING, Union
from pyspark.sql import functions as F
from pyspark.sql.types import TimestampType
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.data_type_ops.base import DataTypeOps
from pyspark.pandas.typedef import as_spark_type
if TYPE_CHECKING:
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
class DatetimeOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with spark type: TimestampType.
"""
@property
def pretty_name(self) -> str:
return 'datetimes'
def sub(self, left, right) -> Union["Series", "Index"]:
# Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's
# behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.
msg = (
"Note that there is a behavior difference of timestamp subtraction. "
"The timestamp subtraction returns an integer in seconds, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, TimestampType):
warnings.warn(msg, UserWarning)
return left.astype("long") - right.astype("long")
elif isinstance(right, datetime.datetime):
warnings.warn(msg, UserWarning)
return left.astype("long").spark.transform(
lambda scol: scol - F.lit(right).cast(as_spark_type("long"))
)
else:
raise TypeError("datetime subtraction can only be applied to datetime series.")
def rsub(self, left, right) -> Union["Series", "Index"]:
# Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's
# behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.
msg = (
"Note that there is a behavior difference of timestamp subtraction. "
"The timestamp subtraction returns an integer in seconds, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(right, datetime.datetime):
warnings.warn(msg, UserWarning)
return -(left.astype("long")).spark.transform(
lambda scol: scol - F.lit(right).cast(as_spark_type("long"))
)
else:
raise TypeError("datetime subtraction can only be applied to datetime series.")
| apache-2.0 |
zorroblue/scikit-learn | examples/preprocessing/plot_scaling_importance.py | 45 | 5269 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Importance of Feature Scaling
=========================================================
Feature scaling though standardization (or Z-score normalization)
can be an important preprocessing step for many machine learning
algorithms. Standardization involves rescaling the features such
that they have the properties of a standard normal distribution
with a mean of zero and a standard deviation of one.
While many algorithms (such as SVM, K-nearest neighbors, and logistic
regression) require features to be normalized, intuitively we can
think of Principle Component Analysis (PCA) as being a prime example
of when normalization is important. In PCA we are interested in the
components that maximize the variance. If one component (e.g. human
height) varies less than another (e.g. weight) because of their
respective scales (meters vs. kilos), PCA might determine that the
direction of maximal variance more closely corresponds with the
'weight' axis, if those features are not scaled. As a change in
height of one meter can be considered much more important than the
change in weight of one kilogram, this is clearly incorrect.
To illustrate this, PCA is performed comparing the use of data with
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` applied,
to unscaled data. The results are visualized and a clear difference noted.
The 1st principal component in the unscaled set can be seen. It can be seen
that feature #13 dominates the direction, being a whole two orders of
magnitude above the other features. This is contrasted when observing
the principal component for the scaled version of the data. In the scaled
version, the orders of magnitude are roughly the same across all the features.
The dataset used is the Wine Dataset available at UCI. This dataset
has continuous features that are heterogeneous in scale due to differing
properties that they measure (i.e alcohol content, and malic acid).
The transformed data is then used to train a naive Bayes classifier, and a
clear difference in prediction accuracies is observed wherein the dataset
which is scaled before PCA vastly outperforms the unscaled version.
"""
from __future__ import print_function
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.pipeline import make_pipeline
print(__doc__)
# Code source: Tyler Lanigan <[email protected]>
# Sebastian Raschka <[email protected]>
# License: BSD 3 clause
RANDOM_STATE = 42
FIG_SIZE = (10, 7)
features, target = load_wine(return_X_y=True)
# Make a train/test split using 30% test size
X_train, X_test, y_train, y_test = train_test_split(features, target,
test_size=0.30,
random_state=RANDOM_STATE)
# Fit to data and predict using pipelined GNB and PCA.
unscaled_clf = make_pipeline(PCA(n_components=2), GaussianNB())
unscaled_clf.fit(X_train, y_train)
pred_test = unscaled_clf.predict(X_test)
# Fit to data and predict using pipelined scaling, GNB and PCA.
std_clf = make_pipeline(StandardScaler(), PCA(n_components=2), GaussianNB())
std_clf.fit(X_train, y_train)
pred_test_std = std_clf.predict(X_test)
# Show prediction accuracies in scaled and unscaled data.
print('\nPrediction accuracy for the normal test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test)))
print('\nPrediction accuracy for the standardized test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test_std)))
# Extract PCA from pipeline
pca = unscaled_clf.named_steps['pca']
pca_std = std_clf.named_steps['pca']
# Show first principal componenets
print('\nPC 1 without scaling:\n', pca.components_[0])
print('\nPC 1 with scaling:\n', pca_std.components_[0])
# Scale and use PCA on X_train data for visualization.
scaler = std_clf.named_steps['standardscaler']
X_train_std = pca_std.transform(scaler.transform(X_train))
# visualize standardized vs. untouched dataset with PCA performed
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=FIG_SIZE)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax1.scatter(X_train[y_train == l, 0], X_train[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax2.scatter(X_train_std[y_train == l, 0], X_train_std[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
ax1.set_title('Training dataset after PCA')
ax2.set_title('Standardized training dataset after PCA')
for ax in (ax1, ax2):
ax.set_xlabel('1st principal component')
ax.set_ylabel('2nd principal component')
ax.legend(loc='upper right')
ax.grid()
plt.tight_layout()
plt.show()
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/gaussian_process/gpr.py | 9 | 20571 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
from sklearn.utils.deprecation import deprecated
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations.
This can also prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
If an array is passed, it must have the same number of entries as the
data used for fitting and is used as datapoint-dependent noise level.
Note that this is equivalent to adding a WhiteKernel with c=alpha.
Allowing to specify the noise level directly as a parameter is mainly
for convenience and for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
@property
@deprecated("Attribute rng was deprecated in version 0.19 and "
"will be removed in 0.21.")
def rng(self):
return self._rng
@property
@deprecated("Attribute y_train_mean was deprecated in version 0.19 and "
"will be removed in 0.21.")
def y_train_mean(self):
return self._y_train_mean
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self._y_train_mean
else:
self._y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
# self.L_ changed, self._K_inv needs to be recomputed
self._K_inv = None
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
else:
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = kernel(X)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self._y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# cache result of K_inv computation
if self._K_inv is None:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T,
np.eye(self.L_.shape[0]))
self._K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ij->i",
np.dot(K_trans, self._K_inv), K_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the
random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
thjashin/tensorflow | tensorflow/python/estimator/inputs/pandas_io_test.py | 3 | 8612 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
def print_debug_info(module_pd, has_pandas, msg):
if module_pd is None:
print('HAS_PANDAS {} and module pd is None. Msg {}'.format(has_pandas, msg))
else:
has_data_frame = hasattr(module_pd, 'DataFrame')
print('HAS_PANDAS {} and hasattr(pd, "DataFrame") {}. Msg {}'.format(
has_pandas, has_data_frame, msg))
if not has_data_frame:
print('symbols in pd {}'.format(dir(module_pd)))
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
print_debug_info(pd, HAS_PANDAS, 'import statement')
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
print_debug_info(None, HAS_PANDAS, 'import statement')
except ImportError:
HAS_PANDAS = False
print_debug_info(None, HAS_PANDAS, 'import statement')
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
print_debug_info(pd, HAS_PANDAS, 'in test case')
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
gwpy/gwpy.github.io | docs/0.7.5/plotter/colors-1.py | 7 | 1123 | from __future__ import division
import numpy
from matplotlib import (pyplot, rcParams)
from matplotlib.colors import to_hex
from gwpy.plotter import colors
rcParams.update({
'text.usetex': False,
'font.size': 15
})
th = numpy.linspace(0, 2*numpy.pi, 512)
names = [
'gwpy:geo600',
'gwpy:kagra',
'gwpy:ligo-hanford',
'gwpy:ligo-india',
'gwpy:ligo-livingston',
'gwpy:virgo',
]
fig = pyplot.figure(figsize=(5, 2))
ax = fig.gca()
ax.axis('off')
for j, name in enumerate(sorted(names)):
c = str(to_hex(name))
v_offset = -(j / len(names))
ax.plot(th, .1*numpy.sin(th) + v_offset, color=c)
ax.annotate("{!r}".format(name), (0, v_offset), xytext=(-1.5, 0),
ha='right', va='center', color=c,
textcoords='offset points', family='monospace')
ax.annotate("{!r}".format(c), (2*numpy.pi, v_offset), xytext=(1.5, 0),
ha='left', va='center', color=c,
textcoords='offset points', family='monospace')
fig.subplots_adjust(**{'bottom': 0.0, 'left': 0.54,
'right': 0.78, 'top': 1})
pyplot.show() | gpl-3.0 |
JohnUrban/fast5tools | bin/fast5plot.py | 1 | 5192 | #!/usr/bin/env python2.7
## JOHN URBAN (2015,2016)
import h5py, os, sys
import cStringIO as StringIO
from Bio import SeqIO
from fast5tools.f5class import *
from fast5tools.f5ops import *
import argparse
from glob import glob
import matplotlib
## may need following line for remote jobs (e.g. submitting batch scripts)
##matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
##from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
#################################################
## Argument Parser
#################################################
parser = argparse.ArgumentParser(description = """
Given path(s) to fast5 file(s) and/or directories of fast5s, return desired plot given x and y.
1 = base_info_name
2 = molecule length
3 = has complement
4 = has 2d
5 = 2d seq len
6 = template seq len
7 = complement seq len
8 = 2d mean q score
9 = template mean q score
10 = complement mean q score
11 = num input events
12 = num template events
13 = num complement events
14 = num called template events
15 = num called complement events
16 = num skips in template
17 = num skips in complement
18 = fast5 filename (path as given)
19 = fast5 filename (absolute path)
John Urban (2015, 2016)
""", formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('fast5', metavar='fast5', nargs='+',
type= str,
help='''Paths to as many fast5 files and/or directories filled with fast5 files as you want.
Assumes all fast5 files have '.fast5' extension.
If inside dir of dirs with .fast5 files, then can just do "*" to get all files from all dirs.''')
parser.add_argument('-x', '--x', type=int, default=5,
help='''Provide integer corresponding to what information is on x-axis.''')
parser.add_argument('-y', '--y', type=int, default=8,
help='''Provide integer corresponding to what information is on y-axis.''')
parser.add_argument('-t', '--title', type=str, default=None,
help='''Provide title.''')
pparser.add_argument('--notarlite', action='store_true', default=False, help=''' The default methof (called tarlite) extracts 1 file from a given tarchive at a time, processes, and deletes it.
This options says to turn tarlite off resulting in extracting entire tarchive before proceeding (and finally deleting).
It is possible that --notarlite is faster, but at the expense of exceeding file number limits or disk storage quotas.
Nonetheless, the difference in speed is a lot smaller than the difference in space needed.
For example, not using tarlite will require >2*tarchive amount of disk space (i.e. the tar.gz and its extracted contents).
The tarlite method only requires the disk space already taken by the tarchive and enough for 1 additional file at a time.
A corollary is that tarlite just needs to be allowed to form 1 (or a few) files compared to what could be thousands to millions.
''')
parser.add_argument('--tarlite', action='store_true', default=False, help='''This legacy option is outdated.
However, it is kept here to avoid breaking pipelines that make use of it.
The tarlite approach is now default. Specifying this will not change that default behavior.
It will just prevent pipelines from breaking.
However, not specifying this will still also result in the tarlite approach.
Use --notarlite to turn it off.''')
args = parser.parse_args()
#################################################
## deal with some of the arguments
#################################################
num_f5cmds = len(f5fxn.keys())
safe_keys = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]
assert args.x in safe_keys
assert args.y in safe_keys
names = {}
names[2] = 'Molecule Length'
names[3] = 'Has Complement'
names[4] = 'Has 2D'
names[5] = '2D SeqLen'
names[6] = 'Template SeqLen'
names[7] = 'Complement SeqLen'
names[8] = '2D Mean q-score'
names[9] = 'Template Mean q-score'
names[10] = 'Complement Mean q-score'
names[11] = 'Number of Input Events'
names[12] = 'Number of Template Events'
names[13] = 'Number of Complement Events'
names[14] = 'Number of Called Template Events'
names[15] = 'Number of Called Complement Events'
names[16] = 'Number of Skips in Template'
names[17] = 'Number of Skips in Complement'
def get_fast5_data(f5cmd, f5):
try:
return float(f5fxn[f5cmd](f5))
except:
return None
def make_title(x,y, names):
return names[x] + " Vs. " + names[y]
#################################################
#### EXECUTE @@@@@@@@@@@@
#################################################
##TODO:
## Also make plotting from fast5totable files
if __name__ == "__main__":
if args.title is None:
args.title = make_title(args.x, args.y, names=names)
x = []
y = []
for f5 in Fast5List(args.fast5, keep_tar_footprint_small=(not args.notarlite)):
x.append( get_fast5_data(args.x, f5) )
y.append( get_fast5_data(args.y, f5) )
print x
print y
## will need to process those with "-"
plt.title(args.title)
plt.xlabel(names[args.x])
plt.ylabel(names[args.y])
plt.scatter(x,y)
plt.show()
| mit |
choderalab/openpathsampling | examples/alanine_dipeptide_mstis/alatools.py | 4 | 7508 | import matplotlib.pyplot as plt
import openpathsampling as paths
import numpy as np
import math
class CVSphere(paths.Volume):
"""
Defines a sphere in multi-CV space with center and distance
"""
def __init__(self, cvs, center, radius):
self.cvs = cvs
self.center = center
self.radius = radius
assert(len(cvs) == len(center) == len(radius))
def __call__(self, snapshot):
return math.sqrt(sum(
map(
lambda cv : cv(snapshot)**2
), self.cvs
))
def __and__(self, other):
if isinstance(other, paths.EmptyVolume):
return self
elif isinstance(other, paths.FullVolume):
return other
elif isinstance(other, CVSphere):
dc = np.linalg.norm(np.array(self.center) - np.array(other.center))
# use triangle inequality
if self.radius >= dc + other.radius:
# other is completely in self
return self
elif other.radius >= dc + self.radius:
# self is completely in other
return other
return paths.UnionVolume(
self, other
)
class TwoCVSpherePlot(object):
def __init__(
self, cvs, states, state_centers,
interface_levels, ranges=None):
self.cvs = cvs
self.states = states
self.state_centers = state_centers
self.interface_levels = interface_levels
self._ax1 = 0
self._ax2 = 1
self.figsize = (6, 6)
self.periodic = [math.pi] * len(cvs)
self.zoom = 180 / math.pi
if ranges is None:
self.ranges = ((-180, 180), (-180, 180))
else:
self.ranges = ranges
self.color_fnc = lambda x: (x, x, 0.6)
self.color_fnc = lambda x: (x * 0.5 + 0.4, 0.5 * x + 0.4, 1 * x, 1.0)
def select_axis(self, ax1, ax2):
self._ax1 = ax1
self._ax2 = ax2
def new(self, figsize=None):
if figsize is None:
figsize = self.figsize
plt.figure(figsize=figsize)
def main(self):
n_states = len(self.states)
centers = self.state_centers
levels = self.interface_levels
labels = [state.name[0] for state in self.states]
periodic = (self.periodic[self._ax1], self.periodic[self._ax2])
mirror = [
[-1, 0, 1] if p is not None else [0]
for p in periodic
]
# replace None with zero
periodic = [p or 0 for p in periodic]
plt.plot(
[x[self._ax1] for x in centers],
[x[self._ax2] for x in centers],
'ko')
fig = plt.gcf()
all_levels = sorted(
list(set(
sum(levels, [])
)),
reverse=True
) + [0]
plt.xlabel(self.cvs[self._ax1].name)
plt.ylabel(self.cvs[self._ax2].name)
max_level = max(all_levels)
zoom = self.zoom
for level in all_levels:
for colored in [True, False]:
for state in range(n_states):
center = centers[state]
center = (center[self._ax1], center[self._ax2])
name = labels[state]
if level == 0:
plt.annotate(
name,
xy=center,
xytext=(center[0]+10 + 1, center[1] - 1),
fontsize=20,
color='k'
)
plt.annotate(
name,
xy=center,
xytext=(center[0]+10, center[1]),
fontsize=20,
color='w'
)
if level in levels[state]:
for xp in mirror[0]:
for yp in mirror[1]:
if colored:
circle = plt.Circle(
(center[0] + xp * periodic[0] * zoom * 2,
center[1] + yp * periodic[1] * zoom * 2),
level,
color='w'
)
fig.gca().add_artist(circle)
else:
l = 1.0 * level / max_level
circle = plt.Circle(
(center[0] + xp * periodic[0] * zoom * 2,
center[1] + yp * periodic[1] * zoom * 2),
level - 1,
color=self.color_fnc(l)
)
fig.gca().add_artist(circle)
# plt.axis((-180,180,-180,180))
plt.axis('equal')
plt.xlim(*self.ranges[0])
plt.ylim(*self.ranges[1])
def _cvlines(self, snapshots):
cvs = self.cvs
all_points = [cv(snapshots) for cv in cvs]
ret = []
first = 0
if len(snapshots) > 1:
for d in range(1, len(snapshots)):
flip = False
for c in range(len(cvs)):
if self.periodic[c] is not None and self._periodicflip(
all_points[c][d],
all_points[c][d-1],
self.periodic[c]
):
flip = True
if flip:
ret.append([all_points[c][first:d] for c in range(len(cvs))])
first = d
ret.append([all_points[c][first:d+1] for c in range(len(cvs))])
return ret
@staticmethod
def _periodicflip(val1, val2, period):
return (period**2 - (val1 - val2)**2) < (val1 - val2)**2
def add_trajectory(self, trajectory, line=True, points=True):
angles = self._cvlines(trajectory)
zoom = self.zoom
for angle in angles:
if points:
plt.plot(
zoom * np.array(angle[self._ax1])[:],
zoom * np.array(angle[self._ax2])[:],
'ko',
linewidth=0.5)
if line:
plt.plot(
zoom * np.array(angle[self._ax1])[:],
zoom * np.array(angle[self._ax2])[:],
'k-',
linewidth=0.5)
def add_snapshot(self, snapshot, label=None):
zoom = self.zoom
angle = [cv(snapshot) for cv in self.cvs]
x = zoom * np.array(angle[self._ax1])
y = zoom * np.array(angle[self._ax2])
plt.plot(
x, y,
'w+',
mew=5, ms=14)
plt.plot(
x, y,
'k+',
mew=3, ms=12)
if label is not None:
plt.annotate(
label,
xy=(x, y),
xytext=(x + 6, y + 4),
fontsize=12,
color='w'
)
plt.annotate(
label,
xy=(x, y),
xytext=(x + 5, y + 5),
fontsize=12,
color='k'
) | lgpl-2.1 |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-stats-genextreme-1.py | 1 | 1111 | from scipy.stats import genextreme
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
# Calculate a few first moments:
c = -0.1
mean, var, skew, kurt = genextreme.stats(c, moments='mvsk')
# Display the probability density function (``pdf``):
x = np.linspace(genextreme.ppf(0.01, c),
genextreme.ppf(0.99, c), 100)
ax.plot(x, genextreme.pdf(x, c),
'r-', lw=5, alpha=0.6, label='genextreme pdf')
# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.
# Freeze the distribution and display the frozen ``pdf``:
rv = genextreme(c)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Check accuracy of ``cdf`` and ``ppf``:
vals = genextreme.ppf([0.001, 0.5, 0.999], c)
np.allclose([0.001, 0.5, 0.999], genextreme.cdf(vals, c))
# True
# Generate random numbers:
r = genextreme.rvs(c, size=1000)
# And compare the histogram:
ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
| gpl-2.0 |
vincentchoqueuse/parametrix | examples/ex_bayesian_linear_estimation_MC.py | 1 | 1094 | import numpy as np
import scipy.linalg as lg
import matplotlib.pyplot as plt
from parametrix.monte_carlo.estimators import MC_Simulations_MSE
from parametrix.bayesian_linear_model.signal_models import M_Bayesian_L
from parametrix.bayesian_linear_model.estimators import E_Bayesian_L
from parametrix.bayesian_linear_model.statistics import S_x_Bayesian_L
""" This scripts shows the evolution of the MSE versus signal length for 2 linear estimators:
* Exact Least Square estimator (LSE),
* Least Square estimator (LSE) with model mismatch.
The Cramer Rao is also plotted for comparison. """
H=np.array([[1,2],[3,4],[1,6]])
m_x=np.array([3,5])
C_x=np.array([[1,0.2],[0.2,1]])
C_w=0.1*np.array([[1,0.2,0.05],[0.2,0.5,0.02],[0.05,0.02,0.9]])
#signal
signal=M_Bayesian_L(H,m_x,C_x,C_w)
#estimator
estimator=E_Bayesian_L(H,m_x,C_x,C_w,name="Bayesian estimator")
#statistic
statistic=S_x_Bayesian_L()
mc=MC_Simulations_MSE("SNR",np.arange(0,100,10),estimator,statistic_list=[statistic],short_param_names="x")
output=mc.trials(signal,nb_trials=1000,verbose=1,plot=1)
plt.show()
| bsd-3-clause |
eg-zhang/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
Nyker510/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
DCSaunders/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 8 | 6001 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
def pandas_input_fn(x, y=None, batch_size=128, num_epochs=None, shuffle=True,
queue_capacity=1000, num_threads=1, target_column='target',
index_column='index'):
"""Returns input function that would feed pandas DataFrame into the model.
Note: If y's index doesn't match x's index exception will be raised.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If `None` will
run indefinetly.
shuffle: int, if shuffle the queue. Please make sure you don't shuffle at
prediction time.
queue_capacity: int, size of queue to accumulate.
num_threads: int, number of threads used for reading and enqueueing.
target_column: str, used to pack `y` into `x` DataFrame under this column.
index_column: str, name of the feature return with index.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `target_column` column is already in `x` DataFrame.
"""
def input_fn():
"""Pandas input function."""
if y is not None:
if target_column in x:
raise ValueError('Found already column \'%s\' in x, please change '
'target_column to something else. Current columns '
'in x: %s', target_column, x.columns)
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatch, this will lead '
'to missing values. Please make sure they match or '
'use .reset_index() method.\n'
'Index for x: %s\n'
'Index for y: %s\n', x.index, y.index)
x[target_column] = y
queue = feeding_functions.enqueue_data(
x, queue_capacity, shuffle=shuffle, num_threads=num_threads,
enqueue_size=batch_size, num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
features = dict(zip([index_column] + list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
CallaJun/hackprince | indico/matplotlib/tests/test_dviread.py | 15 | 1788 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_equal
import matplotlib.dviread as dr
import os.path
original_find_tex_file = dr.find_tex_file
def setup():
dr.find_tex_file = lambda x: x
def teardown():
dr.find_tex_file = original_find_tex_file
def test_PsfontsMap():
filename = os.path.join(
os.path.dirname(__file__),
'baseline_images', 'dviread', 'test.map')
fontmap = dr.PsfontsMap(filename)
# Check all properties of a few fonts
for n in [1, 2, 3, 4, 5]:
key = 'TeXfont%d' % n
entry = fontmap[key]
assert_equal(entry.texname, key)
assert_equal(entry.psname, 'PSfont%d' % n)
if n not in [3, 5]:
assert_equal(entry.encoding, 'font%d.enc' % n)
elif n == 3:
assert_equal(entry.encoding, 'enc3.foo')
# We don't care about the encoding of TeXfont5, which specifies
# multiple encodings.
if n not in [1, 5]:
assert_equal(entry.filename, 'font%d.pfa' % n)
else:
assert_equal(entry.filename, 'font%d.pfb' % n)
if n == 4:
assert_equal(entry.effects, {'slant': -0.1, 'extend': 2.2})
else:
assert_equal(entry.effects, {})
# Some special cases
entry = fontmap['TeXfont6']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont7']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, 'font7.enc')
entry = fontmap['TeXfont8']
assert_equal(entry.filename, 'font8.pfb')
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont9']
assert_equal(entry.filename, '/absolute/font9.pfb')
| lgpl-3.0 |
UASLab/ImageAnalysis | scripts/archive/6b-delaunay3.py | 1 | 10722 | #!/usr/bin/python
import sys
sys.path.insert(0, "/usr/local/opencv3/lib/python2.7/site-packages/")
import argparse
import commands
import cPickle as pickle
import cv2
import fnmatch
import itertools
#import json
import math
import matplotlib.pyplot as plt
import numpy as np
import os.path
from progress.bar import Bar
import scipy.spatial
sys.path.append('../lib')
import Matcher
import Pose
import ProjectMgr
import SRTM
import transformations
parser = argparse.ArgumentParser(description='Compute Delauney triangulation of matches.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--ground', type=float, help='ground elevation')
parser.add_argument('--depth', action='store_const', const=True,
help='generate 3d surface')
parser.add_argument('--steps', default=25, type=int, help='grid steps')
args = parser.parse_args()
# project the estimated uv coordinates for the specified image and ned
# point
def compute_feature_uv(K, image, ned):
if image.PROJ is None:
rvec, tvec = image.get_proj_sba()
R, jac = cv2.Rodrigues(rvec)
image.PROJ = np.concatenate((R, tvec), axis=1)
PROJ = image.PROJ
uvh = K.dot( PROJ.dot( np.hstack((ned, 1.0)) ).T )
uvh /= uvh[2]
uv = np.array( [ np.squeeze(uvh[0,0]), np.squeeze(uvh[1,0]) ] )
return uv
def redistort(u, v, dist_coeffs, K):
fx = K[0,0]
fy = K[1,1]
cx = K[0,2]
cy = K[1,2]
x = (u - cx) / fx
y = (v - cy) / fy
#print [x, y]
k1, k2, p1, p2, k3 = dist_coeffs
# Compute radius^2
r2 = x**2 + y**2
r4, r6 = r2**2, r2**3
# Compute tangential distortion
dx = 2*p1*x*y + p2*(r2 + 2*x*x)
dy = p1*(r2 + 2*y*y) + 2*p2*x*y
# Compute radial factor
Lr = 1.0 + k1*r2 + k2*r4 + k3*r6
ud = Lr*x + dx
vd = Lr*y + dy
return ud * fx + cx, vd * fy + cy
# adds the value to the list and returns the index
def unique_add( mylist, value ):
key = "%.5f,%.5f,%.5f" % (value[0], value[1], value[2])
if key in mylist:
return mylist[key]['index']
else:
mylist[key] = {}
mylist[key]['index'] = mylist['counter']
mylist[key]['vertex'] = value
mylist['counter'] += 1
return mylist['counter'] - 1
def gen_ac3d_object(f, name, raw_tris):
vertices = {}
vertices['counter'] = 0
tris = []
for tri in raw_tris:
for v in tri:
ned = [ v[0], v[1], v[2] ]
index = unique_add( vertices, ned )
tris.append( [ index, v[3], v[4] ] )
print "raw vertices =", len(raw_tris)*3
print "indexed vertices =", len(vertices)
# sort the dictionary into an array so we can output it in the
# correct order
vertex_list = [None] * (len(vertices) - 1) # skip counter record
for key in vertices:
if key != 'counter':
index = vertices[key]['index']
v = vertices[key]['vertex']
vertex_list[index] = v
f.write("OBJECT poly\n")
f.write("texture \"./Textures/" + name + "\"\n")
f.write("loc 0 0 0\n")
f.write("numvert %d\n" % len(vertex_list))
for i, v in enumerate(vertex_list):
if args.depth:
f.write("%.3f %.3f %.3f\n" % (v[1], v[0], -v[2]))
else:
f.write("%.3f %.3f %.3f\n" % (v[1], v[0], 0.0))
f.write("numsurf %d\n" % (len(tris) / 3))
for i in range(len(tris) / 3):
f.write("SURF 0x30\n")
f.write("mat 0\n")
f.write("refs 3\n")
t = tris[3*i]
f.write("%d %.4f %.4f\n" % (t[0], t[1], t[2]))
t = tris[3*i + 1]
f.write("%d %.4f %.4f\n" % (t[0], t[1], t[2]))
t = tris[3*i + 2]
f.write("%d %.4f %.4f\n" % (t[0], t[1], t[2]))
f.write("kids 0\n")
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
#proj.load_match_pairs()
print "Loading match points (sba)..."
matches_sba = pickle.load( open( args.project + "/matches_sba", "rb" ) )
#matches_direct = pickle.load( open( args.project + "/matches_direct", "rb" ) )
# iterate through the sba match dictionary and build a list of feature
# points and heights (in x=east,y=north,z=up coordinates)
print "Building raw mesh interpolator"
raw_points = []
raw_values = []
sum_values = 0.0
sum_count = 0
for match in matches_sba:
ned = match[0]
if not ned is None:
raw_points.append( [ned[1], ned[0]] )
raw_values.append( -ned[2] )
sum_values += -ned[2]
sum_count += 1
avg_height = sum_values / sum_count
print "Average elevation = %.1f" % ( avg_height )
tri = scipy.spatial.Delaunay(np.array(raw_points))
interp = scipy.interpolate.LinearNDInterpolator(tri, raw_values)
# compute min/max range of horizontal surface
print "Determining coverage area"
p0 = raw_points[0]
x_min = p0[0]
x_max = p0[0]
y_min = p0[1]
y_max = p0[1]
for p in raw_points:
if p[0] < x_min: x_min = p[0]
if p[0] > x_max: x_max = p[0]
if p[1] < y_min: y_min = p[1]
if p[1] > y_max: y_max = p[1]
print "Area coverage = %.1f,%.1f to %.1f,%.1f (%.1f x %.1f meters)" % \
(x_min, y_min, x_max, y_max, x_max-x_min, y_max-y_min)
# temporary
x_min = -400
x_max = 400
y_min = -400
y_max = 400
# now count how many features show up in each image
for image in proj.image_list:
image.feature_count = 0
for match in matches_sba:
if not match[0] is None:
for p in match[1:]:
image = proj.image_list[ p[0] ]
image.feature_count += 1
for image in proj.image_list:
print image.feature_count,
print
# # compute number of connections per image
# for image in proj.image_list:
# image.connections = 0
# for pairs in image.match_list:
# if len(pairs) >= 8:
# image.connections += 1
# #if image.connections > 1:
# # print "%s connections: %d" % (image.name, image.connections)
# construct grid of points for rendering and interpolate elevation
# from raw mesh
steps = args.steps
x_list = np.linspace(x_min, x_max, steps + 1)
y_list = np.linspace(y_min, y_max, steps + 1)
grid_points = []
grid_values = []
for y in y_list:
for x in x_list:
value = interp([x, y])
if value:
grid_points.append( [x, y] )
if args.ground:
grid_values.append(args.ground)
else:
grid_values.append( interp([x, y]) )
print "Building grid triangulation..."
tri = scipy.spatial.Delaunay(np.array(grid_points))
# start with empty triangle lists
# format: [ [v[0], v[1], v[2], u, v], .... ]
for image in proj.image_list:
image.tris = []
print "Points:", len(grid_points)
print "Triangles:", len(tri.simplices)
good_tris = 0
failed_tris = 0
# make sure we start with an empty projection matrix for each image
for image in proj.image_list:
image.PROJ = None
# iterate through the triangle list
bar = Bar('Generating 3d model:', max=len(tri.simplices),
suffix='%(percent).1f%% - %(eta)ds')
bar.sma_window = 50
camw, camh = proj.cam.get_image_params()
fuzz = 20.0
count = 0
update_steps = 10
for tri in tri.simplices:
# print "Triangle:", tri
# compute triangle center
sum = np.array( [0.0, 0.0, 0.0] )
for vert in tri:
#print vert
ned = [ grid_points[vert][1], grid_points[vert][0], -grid_values[vert] ]
sum += np.array( ned )
tri_center = sum / len(tri)
#print tri_center
# look for complete coverage, possibly estimating uv by
# reprojection if a feature wasn't found originally
done = False
best_image = None
best_connections = -1
best_metric = 10000.0 * 10000.0
best_triangle = []
for image in proj.image_list:
ok = True
# reject images with no connections to the set
if image.camera_pose_sba == None:
ok = False
continue
# quick 3d bounding radius rejection
dist = np.linalg.norm(image.center - tri_center)
if dist > image.radius + fuzz:
ok = False
continue
# we passed the initial proximity test
triangle = []
for vert in tri:
ned = [ grid_points[vert][1], grid_points[vert][0],
-grid_values[vert] ]
scale = float(image.width) / float(camw)
uv = compute_feature_uv(proj.cam.get_K(scale), image, ned)
dist_coeffs = proj.cam.get_dist_coeffs()
uv[0], uv[1] = redistort(uv[0], uv[1], dist_coeffs, proj.cam.get_K(scale))
uv[0] /= image.width
uv[1] /= image.height
v = list(ned)
v.append(uv[0])
v.append(1.0 - uv[1])
triangle.append(v)
if uv[0] < 0.0 or uv[0] > 1.0:
#print " fail"
ok = False
if uv[1] < 0.0 or uv[1] > 1.0:
#print " fail"
ok = False
if ok:
# print " pass!"
# compute center of triangle
dist_cam = np.linalg.norm( image.camera_pose_sba['ned'] - tri_center )
dist_img = np.linalg.norm( image.center - tri_center )
dist_cycle = image.cycle_depth
# favor the image source that is seeing this triangle
# directly downwards, but also favor the image source that
# has us closest to the center of projection
#metric = dist_cam * dist_img
#metric = dist_cam
cycle_gain = 0.02
#metric = dist_cam * (1 + dist_cycle * cycle_gain)
#metric = image.cycle_depth
#metric = image.connection_order
metric = image.connections
if metric < best_metric:
best_metric = metric
best_image = image
best_triangle = list(triangle)
if not best_image == None:
# print "Best image (hard): %d (%d)" % (best_image, best_connections)
# print " ", best_triangle
best_image.tris.append(best_triangle)
good_tris += 1
done = True
if not done:
# print "failed triangle"
failed_tris += 1
count += 1
if count % update_steps == 0:
bar.next(update_steps)
bar.finish()
print "good tris =", good_tris
print "failed tris =", failed_tris
# write out an ac3d file
name = args.project + "/sba3d.ac"
f = open( name, "w" )
f.write("AC3Db\n")
trans = 0.0
f.write("MATERIAL \"\" rgb 1 1 1 amb 0.6 0.6 0.6 emis 0 0 0 spec 0.5 0.5 0.5 shi 10 trans %.2f\n" % (trans))
f.write("OBJECT world\n")
f.write("kids " + str(len(proj.image_list)) + "\n")
for image in proj.image_list:
print image.name, len(image.tris)
gen_ac3d_object(f, image.name, image.tris)
| mit |
tdhopper/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/tests/test_rcparams.py | 5 | 15179 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
import os
import warnings
from collections import OrderedDict
from cycler import cycler, Cycler
try:
from unittest import mock
except ImportError:
import mock
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.tests import assert_str_equal
from matplotlib.testing.decorators import cleanup, knownfailureif
import matplotlib.colors as mcolors
from nose.tools import assert_true, assert_raises, assert_equal
import nose
from itertools import chain
import numpy as np
from matplotlib.rcsetup import (validate_bool_maybe_none,
validate_stringlist,
validate_colorlist,
validate_bool,
validate_nseq_int,
validate_nseq_float,
validate_cycler,
validate_hatch,
validate_hist_bins)
mpl.rc('text', usetex=False)
mpl.rc('lines', linewidth=22)
fname = os.path.join(os.path.dirname(__file__), 'test_rcparams.rc')
def test_rcparams():
usetex = mpl.rcParams['text.usetex']
linewidth = mpl.rcParams['lines.linewidth']
# test context given dictionary
with mpl.rc_context(rc={'text.usetex': not usetex}):
assert mpl.rcParams['text.usetex'] == (not usetex)
assert mpl.rcParams['text.usetex'] == usetex
# test context given filename (mpl.rc sets linewdith to 33)
with mpl.rc_context(fname=fname):
assert mpl.rcParams['lines.linewidth'] == 33
assert mpl.rcParams['lines.linewidth'] == linewidth
# test context given filename and dictionary
with mpl.rc_context(fname=fname, rc={'lines.linewidth': 44}):
assert mpl.rcParams['lines.linewidth'] == 44
assert mpl.rcParams['lines.linewidth'] == linewidth
# test rc_file
try:
mpl.rc_file(fname)
assert mpl.rcParams['lines.linewidth'] == 33
finally:
mpl.rcParams['lines.linewidth'] = linewidth
def test_RcParams_class():
rc = mpl.RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': 'sans-serif',
'font.weight': 'normal',
'font.size': 12})
if six.PY3:
expected_repr = """
RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': ['sans-serif'],
'font.size': 12.0,
'font.weight': 'normal'})""".lstrip()
else:
expected_repr = """
RcParams({u'font.cursive': [u'Apple Chancery',
u'Textile',
u'Zapf Chancery',
u'cursive'],
u'font.family': [u'sans-serif'],
u'font.size': 12.0,
u'font.weight': u'normal'})""".lstrip()
assert_str_equal(expected_repr, repr(rc))
if six.PY3:
expected_str = """
font.cursive: ['Apple Chancery', 'Textile', 'Zapf Chancery', 'cursive']
font.family: ['sans-serif']
font.size: 12.0
font.weight: normal""".lstrip()
else:
expected_str = """
font.cursive: [u'Apple Chancery', u'Textile', u'Zapf Chancery', u'cursive']
font.family: [u'sans-serif']
font.size: 12.0
font.weight: normal""".lstrip()
assert_str_equal(expected_str, str(rc))
# test the find_all functionality
assert ['font.cursive', 'font.size'] == sorted(rc.find_all('i[vz]').keys())
assert ['font.family'] == list(six.iterkeys(rc.find_all('family')))
def test_rcparams_update():
rc = mpl.RcParams({'figure.figsize': (3.5, 42)})
bad_dict = {'figure.figsize': (3.5, 42, 1)}
# make sure validation happens on input
with assert_raises(ValueError):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(validate)',
category=UserWarning)
rc.update(bad_dict)
def test_rcparams_init():
with assert_raises(ValueError):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(validate)',
category=UserWarning)
mpl.RcParams({'figure.figsize': (3.5, 42, 1)})
@cleanup
def test_Bug_2543():
# Test that it possible to add all values to itself / deepcopy
# This was not possible because validate_bool_maybe_none did not
# accept None as an argument.
# https://github.com/matplotlib/matplotlib/issues/2543
# We filter warnings at this stage since a number of them are raised
# for deprecated rcparams as they should. We dont want these in the
# printed in the test suite.
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(deprecated|obsolete)',
category=UserWarning)
with mpl.rc_context():
_copy = mpl.rcParams.copy()
for key in six.iterkeys(_copy):
mpl.rcParams[key] = _copy[key]
mpl.rcParams['text.dvipnghack'] = None
with mpl.rc_context():
from copy import deepcopy
_deep_copy = deepcopy(mpl.rcParams)
# real test is that this does not raise
assert_true(validate_bool_maybe_none(None) is None)
assert_true(validate_bool_maybe_none("none") is None)
_fonttype = mpl.rcParams['svg.fonttype']
assert_true(_fonttype == mpl.rcParams['svg.embed_char_paths'])
with mpl.rc_context():
mpl.rcParams['svg.embed_char_paths'] = False
assert_true(mpl.rcParams['svg.fonttype'] == "none")
with assert_raises(ValueError):
validate_bool_maybe_none("blah")
with assert_raises(ValueError):
validate_bool(None)
with assert_raises(ValueError):
with mpl.rc_context():
mpl.rcParams['svg.fonttype'] = True
@cleanup
def _legend_rcparam_helper(param_dict, target, get_func):
with mpl.rc_context(param_dict):
_, ax = plt.subplots()
ax.plot(range(3), label='test')
leg = ax.legend()
assert_equal(getattr(leg.legendPatch, get_func)(), target)
def test_legend_facecolor():
get_func = 'get_facecolor'
rcparam = 'legend.facecolor'
test_values = [({rcparam: 'r'},
mcolors.to_rgba('r')),
({rcparam: 'inherit', 'axes.facecolor': 'r'},
mcolors.to_rgba('r')),
({rcparam: 'g', 'axes.facecolor': 'r'},
mcolors.to_rgba('g'))]
for rc_dict, target in test_values:
yield _legend_rcparam_helper, rc_dict, target, get_func
def test_legend_edgecolor():
get_func = 'get_edgecolor'
rcparam = 'legend.edgecolor'
test_values = [({rcparam: 'r'},
mcolors.to_rgba('r')),
({rcparam: 'inherit', 'axes.edgecolor': 'r'},
mcolors.to_rgba('r')),
({rcparam: 'g', 'axes.facecolor': 'r'},
mcolors.to_rgba('g'))]
for rc_dict, target in test_values:
yield _legend_rcparam_helper, rc_dict, target, get_func
def test_Issue_1713():
utf32_be = os.path.join(os.path.dirname(__file__),
'test_utf32_be_rcparams.rc')
import locale
with mock.patch('locale.getpreferredencoding', return_value='UTF-32-BE'):
rc = mpl.rc_params_from_file(utf32_be, True, False)
assert rc.get('timezone') == 'UTC'
def _validation_test_helper(validator, arg, target):
res = validator(arg)
if isinstance(target, np.ndarray):
assert_true(np.all(res == target))
elif not isinstance(target, Cycler):
assert_equal(res, target)
else:
# Cyclers can't simply be asserted equal. They don't implement __eq__
assert_equal(list(res), list(target))
def _validation_fail_helper(validator, arg, exception_type):
with assert_raises(exception_type):
validator(arg)
def test_validators():
validation_tests = (
{'validator': validate_bool,
'success': chain(((_, True) for _ in
('t', 'y', 'yes', 'on', 'true', '1', 1, True)),
((_, False) for _ in
('f', 'n', 'no', 'off', 'false', '0', 0, False))),
'fail': ((_, ValueError)
for _ in ('aardvark', 2, -1, [], ))},
{'validator': validate_stringlist,
'success': (('', []),
('a,b', ['a', 'b']),
('aardvark', ['aardvark']),
('aardvark, ', ['aardvark']),
('aardvark, ,', ['aardvark']),
(['a', 'b'], ['a', 'b']),
(('a', 'b'), ['a', 'b']),
(iter(['a', 'b']), ['a', 'b']),
(np.array(['a', 'b']), ['a', 'b']),
((1, 2), ['1', '2']),
(np.array([1, 2]), ['1', '2']),
),
'fail': ((dict(), ValueError),
(1, ValueError),
)
},
{'validator': validate_nseq_int(2),
'success': ((_, [1, 2])
for _ in ('1, 2', [1.5, 2.5], [1, 2],
(1, 2), np.array((1, 2)))),
'fail': ((_, ValueError)
for _ in ('aardvark', ('a', 1),
(1, 2, 3)
))
},
{'validator': validate_nseq_float(2),
'success': ((_, [1.5, 2.5])
for _ in ('1.5, 2.5', [1.5, 2.5], [1.5, 2.5],
(1.5, 2.5), np.array((1.5, 2.5)))),
'fail': ((_, ValueError)
for _ in ('aardvark', ('a', 1),
(1, 2, 3)
))
},
{'validator': validate_cycler,
'success': (('cycler("color", "rgb")',
cycler("color", 'rgb')),
(cycler('linestyle', ['-', '--']),
cycler('linestyle', ['-', '--'])),
("""(cycler("color", ["r", "g", "b"]) +
cycler("mew", [2, 3, 5]))""",
(cycler("color", 'rgb') +
cycler("markeredgewidth", [2, 3, 5]))),
("cycler(c='rgb', lw=[1, 2, 3])",
cycler('color', 'rgb') + cycler('linewidth', [1, 2, 3])),
("cycler('c', 'rgb') * cycler('linestyle', ['-', '--'])",
(cycler('color', 'rgb') *
cycler('linestyle', ['-', '--']))),
(cycler('ls', ['-', '--']),
cycler('linestyle', ['-', '--'])),
(cycler(mew=[2, 5]),
cycler('markeredgewidth', [2, 5])),
),
# This is *so* incredibly important: validate_cycler() eval's
# an arbitrary string! I think I have it locked down enough,
# and that is what this is testing.
# TODO: Note that these tests are actually insufficient, as it may
# be that they raised errors, but still did an action prior to
# raising the exception. We should devise some additional tests
# for that...
'fail': ((4, ValueError), # Gotta be a string or Cycler object
('cycler("bleh, [])', ValueError), # syntax error
('Cycler("linewidth", [1, 2, 3])',
ValueError), # only 'cycler()' function is allowed
('1 + 2', ValueError), # doesn't produce a Cycler object
('os.system("echo Gotcha")', ValueError), # os not available
('import os', ValueError), # should not be able to import
('def badjuju(a): return a; badjuju(cycler("color", "rgb"))',
ValueError), # Should not be able to define anything
# even if it does return a cycler
('cycler("waka", [1, 2, 3])', ValueError), # not a property
('cycler(c=[1, 2, 3])', ValueError), # invalid values
("cycler(lw=['a', 'b', 'c'])", ValueError), # invalid values
(cycler('waka', [1, 3, 5]), ValueError), # not a property
(cycler('color', ['C1', 'r', 'g']), ValueError) # no CN
)
},
{'validator': validate_hatch,
'success': (('--|', '--|'), ('\\oO', '\\oO'),
('/+*/.x', '/+*/.x'), ('', '')),
'fail': (('--_', ValueError),
(8, ValueError),
('X', ValueError)),
},
{'validator': validate_colorlist,
'success': (('r,g,b', ['r', 'g', 'b']),
(['r', 'g', 'b'], ['r', 'g', 'b']),
('r, ,', ['r']),
(['', 'g', 'blue'], ['g', 'blue']),
([np.array([1, 0, 0]), np.array([0, 1, 0])],
np.array([[1, 0, 0], [0, 1, 0]])),
(np.array([[1, 0, 0], [0, 1, 0]]),
np.array([[1, 0, 0], [0, 1, 0]])),
),
'fail': (('fish', ValueError),
),
},
{'validator': validate_hist_bins,
'success': (('auto', 'auto'),
('10', 10),
('1, 2, 3', [1, 2, 3]),
([1, 2, 3], [1, 2, 3]),
(np.arange(15), np.arange(15))
),
'fail': (('aardvark', ValueError),
)
}
)
for validator_dict in validation_tests:
validator = validator_dict['validator']
for arg, target in validator_dict['success']:
yield _validation_test_helper, validator, arg, target
for arg, error_type in validator_dict['fail']:
yield _validation_fail_helper, validator, arg, error_type
def test_keymaps():
key_list = [k for k in mpl.rcParams if 'keymap' in k]
for k in key_list:
assert(isinstance(mpl.rcParams[k], list))
def test_rcparams_reset_after_fail():
# There was previously a bug that meant that if rc_context failed and
# raised an exception due to issues in the supplied rc parameters, the
# global rc parameters were left in a modified state.
with mpl.rc_context(rc={'text.usetex': False}):
assert mpl.rcParams['text.usetex'] is False
with assert_raises(KeyError):
with mpl.rc_context(rc=OrderedDict([('text.usetex', True),('test.blah', True)])):
pass
assert mpl.rcParams['text.usetex'] is False
if __name__ == '__main__':
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
ninotoshi/tensorflow | tensorflow/examples/skflow/iris_custom_decay_dnn.py | 8 | 1737 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
from tensorflow.contrib import skflow
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
# use customized decay function in learning_rate
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=800,
learning_rate=exp_decay)
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
| apache-2.0 |
sly-ninja/python_for_ml | Module5/assignment4.py | 1 | 10045 | import numpy as np
import pandas as pd
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib
#
# TODO: Parameters to play around with
PLOT_TYPE_TEXT = False # If you'd like to see indices
PLOT_VECTORS = True # If you'd like to see your original features in P.C.-Space
matplotlib.style.use('ggplot') # Look Pretty
c = ['red', 'green', 'blue', 'orange', 'yellow', 'brown']
def drawVectors(transformed_features, components_, columns, plt):
num_columns = len(columns)
# This function will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
## Visualize projections
# Sort each column by its length. These are your *original*
# columns, not the principal components.
import math
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print("Projected Features by importance:\n", important_features)
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75, zorder=600000)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75, zorder=600000)
return ax
def doPCA(data, dimensions=2):
from sklearn.decomposition import PCA
model = PCA(n_components=dimensions, svd_solver='randomized', random_state=7)
model.fit(data)
return model
def doKMeans(data, clusters=0):
#
# TODO: Do the KMeans clustering here, passing in the # of clusters parameter
# and fit it against your data. Then, return a tuple containing the cluster
# centers and the labels.
#
# Hint: Just like with doPCA above, you will have to create a variable called
# `model`, which is a SKLearn K-Means model for this to work.
#
model = KMeans(clusters)
model.fit(data)
return model.cluster_centers_, model.labels_
#
# TODO: Load up the dataset. It has may or may not have nans in it. Make
# sure you catch them and destroy them, by setting them to '0'. This is valid
# for this dataset, since if the value is missing, you can assume no $ was spent
# on it.
#
df = pd.read_csv('Datasets/Wholesale_customers_data.csv')
df = df.fillna(0)
#
# TODO: As instructed, get rid of the 'Channel' and 'Region' columns, since
# you'll be investigating as if this were a single location wholesaler, rather
# than a national / international one. Leaving these fields in here would cause
# KMeans to examine and give weight to them.
#
df = df.drop(['Channel', 'Region'], axis=1)
#
# TODO: Before unitizing / standardizing / normalizing your data in preparation for
# K-Means, it's a good idea to get a quick peek at it. You can do this using the
# .describe() method, or even by using the built-in pandas df.plot.hist()
#
# .. your code here ..
#
# INFO: Having checked out your data, you may have noticed there's a pretty big gap
# between the top customers in each feature category and the rest. Some feature
# scaling algos won't get rid of outliers for you, so it's a good idea to handle that
# manually---particularly if your goal is NOT to determine the top customers. After
# all, you can do that with a simple Pandas .sort_values() and not a machine
# learning clustering algorithm. From a business perspective, you're probably more
# interested in clustering your +/- 2 standard deviation customers, rather than the
# creme dela creme, or bottom of the barrel'ers
#
# Remove top 5 and bottom 5 samples for each column:
drop = {}
for col in df.columns:
# Bottom 5
sort = df.sort_values(by=col, ascending=True)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
# Top 5
sort = df.sort_values(by=col, ascending=False)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
#
# INFO Drop rows by index. We do this all at once in case there is a
# collision. This way, we don't end up dropping more rows than we have
# to, if there is a single row that satisfies the drop for multiple columns.
# Since there are 6 rows, if we end up dropping < 5*6*2 = 60 rows, that means
# there indeed were collisions.
print("Dropping {0} Outliers...".format(len(drop)))
df.drop(inplace=True, labels=drop.keys(), axis=0)
print(df.describe())
#
# INFO: What are you interested in?
#
# Depending on what you're interested in, you might take a different approach
# to normalizing/standardizing your data.
#
# You should note that all columns left in the dataset are of the same unit.
# You might ask yourself, do I even need to normalize / standardize the data?
# The answer depends on what you're trying to accomplish. For instance, although
# all the units are the same (generic money unit), the price per item in your
# store isn't. There may be some cheap items and some expensive one. If your goal
# is to find out what items people buy tend to buy together but you didn't
# unitize properly before running kMeans, the contribution of the lesser priced
# item would be dwarfed by the more expensive item.
#
# For a great overview on a few of the normalization methods supported in SKLearn,
# please check out: https://stackoverflow.com/questions/30918781/right-function-for-normalizing-input-of-sklearn-svm
#
# Suffice to say, at the end of the day, you're going to have to know what question
# you want answered and what data you have available in order to select the best
# method for your purpose. Luckily, SKLearn's interfaces are easy to switch out
# so in the mean time, you can experiment with all of them and see how they alter
# your results.
#
#
# 5-sec summary before you dive deeper online:
#
# NORMALIZATION: Let's say your user spend a LOT. Normalization divides each item by
# the average overall amount of spending. Stated differently, your
# new feature is = the contribution of overall spending going into
# that particular item: $spent on feature / $overall spent by sample
#
# MINMAX: What % in the overall range of $spent by all users on THIS particular
# feature is the current sample's feature at? When you're dealing with
# all the same units, this will produce a near face-value amount. Be
# careful though: if you have even a single outlier, it can cause all
# your data to get squashed up in lower percentages.
# Imagine your buyers usually spend $100 on wholesale milk, but today
# only spent $20. This is the relationship you're trying to capture
# with MinMax. NOTE: MinMax doesn't standardize (std. dev.); it only
# normalizes / unitizes your feature, in the mathematical sense.
# MinMax can be used as an alternative to zero mean, unit variance scaling.
# [(sampleFeatureValue-min) / (max-min)] * (max-min) + min
# Where min and max are for the overall feature values for all samples.
#
# TODO: Un-comment just ***ONE*** of lines at a time and see how alters your results
# Pay attention to the direction of the arrows, as well as their LENGTHS
#T = preprocessing.StandardScaler().fit_transform(df)
#T = preprocessing.MinMaxScaler().fit_transform(df)
#T = preprocessing.MaxAbsScaler().fit_transform(df)
#T = preprocessing.Normalizer().fit_transform(df)
#T = df # No Change
#
# INFO: Sometimes people perform PCA before doing KMeans, so that KMeans only
# operates on the most meaningful features. In our case, there are so few features
# that doing PCA ahead of time isn't really necessary, and you can do KMeans in
# feature space. But keep in mind you have the option to transform your data to
# bring down its dimensionality. If you take that route, then your Clusters will
# already be in PCA-transformed feature space, and you won't have to project them
# again for visualization.
# Do KMeans
n_clusters = 3
centroids, labels = doKMeans(T, n_clusters)
#
# TODO: Print out your centroids. They're currently in feature-space, which
# is good. Print them out before you transform them into PCA space for viewing
#
# .. your code here ..
# Do PCA *after* to visualize the results. Project the centroids as well as
# the samples into the new 2D feature space for visualization purposes.
display_pca = doPCA(T)
T = display_pca.transform(T)
CC = display_pca.transform(centroids)
# Visualize all the samples. Give them the color of their cluster label
fig = plt.figure()
ax = fig.add_subplot(111)
if PLOT_TYPE_TEXT:
# Plot the index of the sample, so you can further investigate it in your dset
for i in range(len(T)): ax.text(T[i,0], T[i,1], df.index[i], color=c[labels[i]], alpha=0.75, zorder=600000)
ax.set_xlim(min(T[:,0])*1.2, max(T[:,0])*1.2)
ax.set_ylim(min(T[:,1])*1.2, max(T[:,1])*1.2)
else:
# Plot a regular scatter plot
sample_colors = [ c[labels[i]] for i in range(len(T)) ]
ax.scatter(T[:, 0], T[:, 1], c=sample_colors, marker='o', alpha=0.2)
# Plot the Centroids as X's, and label them
ax.scatter(CC[:, 0], CC[:, 1], marker='x', s=169, linewidths=3, zorder=1000, c=c)
for i in range(len(centroids)): ax.text(CC[i, 0], CC[i, 1], str(i), zorder=500010, fontsize=18, color=c[i])
# Display feature vectors for investigation:
if PLOT_VECTORS: drawVectors(T, display_pca.components_, df.columns, plt)
# Add the cluster label back into the dataframe and display it:
df['label'] = pd.Series(labels, index=df.index)
# print(df)
plt.show()
| mit |
howeverforever/SuperMotor | pic_data/0830/BODY2/main.py | 12 | 4519 | # import serial
import sys
import numpy as np
from lib import Parser, PresentationModel, AnalogData
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
def real_time_process(argv):
"""
When the model has built, then load data real-time to predict the state at the moment.
:param argv:
argv[0]: client ID
argv[1]: connect_port_name
argv[2]: K-envelope's K, 5 is the best.
:return:
"""
"""
_BT_NAME = argv[0]
_PORT_NAME = argv[1]
_K = int(argv[2])
# access file to read model features.
p_model = PresentationModel(PresentationModel.TRAINING_MODEL_FILE)
# plot parameters
analog_data = AnalogData(Parser.PAGESIZE)
print('>> Start to receive data...')
# open serial port
ser = serial.Serial(_PORT_NAME, 9600)
for _ in range(20):
ser.readline()
while True:
try:
# retrieve the line
line = ser.readline().decode()
data = [float(val) for val in line.split(',')]
# no missing column in the data
if len(data) == 3:
# calculate mean gap
analog_data.add(data)
data_list = analog_data.merge_to_list()
real_data = p_model.pca_combine(data_list)
peak_ave = Parser.find_peaks_sorted(real_data)
valley_ave = Parser.find_valley_sorted(real_data)
gap = np.mean(peak_ave) - np.mean(valley_ave)
# is "gap" in K-envelope?
state = p_model.predict(gap, _K)
print("OK" if state == 0 else "warning !!!")
# put result into the target file
fp = open(PresentationModel.TARGET_FILE, 'w')
fp.write(_BT_NAME + '\n' + str(state))
fp.close()
except KeyboardInterrupt:
print('>> exiting !')
break
except IOError:
continue
"""
def file_process(argv):
# access file to read model features.
p_model = PresentationModel(PresentationModel.TRAINING_MODEL_FILE)
analog_data = AnalogData(Parser.PAGESIZE)
print('>> Start to receive data from FILE...')
df = pd.DataFrame(columns={
'delta_t',
'K',
'recorded_time',
'false_positive_ratio'
})
EVENT = 2485
for c in [1, 5, 10, 15, 20, 25, 30, 35, 40]:
for K in [10, 15, 20, 25, 30]:
CONTINOUS_ANOMALY = c
count = 0
fp = open(argv[0], 'r')
K /= 10.0
line_number = 0
warning_count = 0
for file_line in fp:
line_number += 1
if line_number > EVENT:
break
line = file_line.split(',')
data = [float(val) for val in line[1:]]
if len(data) != 3:
continue
analog_data.add(data)
data_list = analog_data.merge_to_list()
real_data = p_model.pca_combine(data_list)
peak_ave = Parser.find_peaks_sorted(real_data)
valley_ave = Parser.find_valley_sorted(real_data)
gap = np.mean(peak_ave) - np.mean(valley_ave)
if line_number >= Parser.PAGESIZE and p_model.predict(gap, K) != 0:
count += 1
if count >= CONTINOUS_ANOMALY:
warning_count += 1
else:
count = 0
delta_t = c / 20
rec_time = argv[1]
e = df.shape[0]
df.loc[e] = {
'delta_t': delta_t,
'recorded_time': rec_time,
'false_positive_ratio': warning_count / (EVENT - Parser.PAGESIZE) * 100,
'K': K
}
print(delta_t, rec_time, warning_count / (EVENT - Parser.PAGESIZE) * 100, K)
df = df[['recorded_time', 'delta_t', 'K', 'false_positive_ratio']]
print(df)
df.to_csv(argv[0][:-4] + '_res.csv', index=False)
def main(argv):
if len(argv) == 3:
real_time_process(argv)
elif len(argv) == 2:
file_process(argv)
else:
print('Error: Only accept exactly 3 parameters.')
print()
print(':param argv:')
print('argv[0]: client ID')
print('argv[1]: connect_port_name')
print('argv[2]: K-envelope\'s K, 5 is the best.')
print()
sys.exit(2)
if __name__ == '__main__':
main(sys.argv[1:])
| apache-2.0 |
jr-minnaar/bitrader | setup.py | 1 | 3850 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
"""
from codecs import open
from os import chdir, pardir, path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# allow setup.py to be run from any path
chdir(path.normpath(path.join(path.abspath(__file__), pardir)))
setup(
name='bitrader',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
# use_scm_version={
# 'write_to': 'src/static/version.txt',
# },
version='0.12.0',
description=(
"Bitcoin Arbitrage tools"
),
long_description=long_description,
# The project's main homepage.
url='https://github.com/jr-minnaar/bitrader',
# Author details
author='JR Minnaar',
author_email='[email protected]',
# Choose your license
license='MIT',
# What does your project relate to?
keywords='bitcoin trading arbitrage',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# If setuptools_scm is installed, this automatically adds everything in version control
include_package_data=True,
zip_safe=True,
# setup_requires=['setuptools_scm'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'telepot>=12.4',
'krakenex>=0.1.4',
'matplotlib',
'seaborn',
'pandas',
'notebook',
'lxml',
'html5lib',
'python-dotenv',
'BeautifulSoup4',
# API tools
'redis', # Make optional?
'requests>=2',
'requests-cache>=0.4.12',
'requests-futures>=0.9.7',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [
'wheel>=0.29.0',
'python-dotenv>=0.5.1',
],
# 'test': [
# 'coverage',
# ],
},
# test_suite='nose.collector',
# tests_require=['invoke'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'arbot=bitrader.main:main',
],
},
)
| mit |
aliutkus/commonfate | examples/cft_mpl-plot.py | 1 | 1793 | import pylab as plt
import numpy as np
import itertools
import soundfile as sf
import argparse
import seaborn as sns
import commonfate
def displaySTFT(X, name=None):
# display a modulation spectrogram, of shape (w1,w2,f,t)
sns.set_style("white")
fig, ax = plt.subplots(1, 1)
plt.figure(1)
plt.pcolormesh(
np.flipud(abs(np.squeeze(X))),
vmin=0,
vmax=10,
cmap='cubehelix_r',
)
if name is None:
plt.show()
else:
plt.savefig(name)
def displayMSTFT(Z, name=None):
# display a modulation spectrogram, of shape (w1,w2,f,t)
plt.figure(1)
(nF, nT) = Z.shape[2:4]
for (f, t) in itertools.product(range(nF), range(nT)):
plt.subplot(nF, nT, (nF-f-1) * nT+t+1)
plt.pcolormesh(
np.flipud(abs(Z[..., f, t])) ** 0.3,
vmin=0,
vmax=10,
cmap='cubehelix_r',
)
plt.xticks([])
plt.xlabel('')
plt.yticks([])
plt.ylabel('')
f = plt.gcf()
f.subplots_adjust(wspace=0, hspace=0)
if name is None:
plt.show()
else:
plt.savefig(name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Source Separation based on Common Fate Model')
parser.add_argument('input', type=str, help='Input Audio File')
args = parser.parse_args()
filename = args.input
# loading signal
(audio, fs) = sf.read(filename, always_2d=True)
x_stft = commonfate.transform.forward(audio, 1024, 512)
x_cft = commonfate.transform.forward(
x_stft, (64, 32), (32, 16), real=False
)
print 'getting modulation spectrogram, shape:', x_cft.shape
z_cft = np.abs(x_cft)
displaySTFT(x_stft)
displayMSTFT(z_cft[..., :, :, 0])
| bsd-3-clause |
boland1992/SeisSuite | build/lib/seissuite/ant/psmcsampling.py | 8 | 12145 | """
Markov chain Monte-Carlo sampling of the parameter space
"""
import matplotlib.pyplot as plt
import numpy as np
np.random.seed()
EPS = 1E-6
# module variable containing samples from U(0, 1)
SIZE_SAMPLES_UNIFORM = int(1E6)
samples_uniform = np.random.uniform(size=SIZE_SAMPLES_UNIFORM)
isample_uniform = 0
class Parameter:
"""
Class holding a model's parameter to be sampled uniformly
along values from *minval* to *maxval* every *step*,
using a Markov chain (random walk)
"""
def __init__(self, name, minval, maxval, step, startval, maxjumpsize, nmaxsample,
frozen=False):
"""
Initialization of parameter space, parameters of random walk
and array of samples
"""
self.name = name
self.frozen = frozen # if frozen, the value never changes
# parameter space and random walk parameters
self.values = np.arange(minval, maxval + step, step)
self._nvalue = len(self.values)
self._minval = minval
self._maxval = maxval
self._step = step
self._maxjumpsize = maxjumpsize
self._startval = startval
self._neighborhoods = []
for value in self.values:
# neighborhood = all (different) values separated by up to *maxjumpsizes*
neighboorhood = [i for i in range(self._nvalue)
if 0 < abs(value - self.values[i]) <= maxjumpsize]
self._neighborhoods.append(neighboorhood)
# parameter's current index
i = np.argmin(np.abs(self.values - self._startval))
if np.abs(self.values[i] - self._startval) > step:
raise Exception('Starting value out of range')
self._currentindex = i
# initializing proposed next index
self._proposednextindex = None
# parameter's samples
self.samples = np.zeros(nmaxsample)
self.nsample = 0
def reinit(self):
"""
Reinitializes the parameter to its initial state
"""
# parameter's current index back to start value
self._currentindex = np.argmin(np.abs(self.values - self._startval))
# reinitializing proposed next index
self._proposednextindex = None
# reinitializing parameter's samples
self.samples[...] = 0.0
self.nsample = 0
def __repr__(self):
s = '(ModelParameter)<{} randomly sampled between {}-{}>'
return s.format(self.name, self._minval, self._maxval)
def __add__(self, other):
"""
Adds two parameter
@type other: Parameter
"""
if other == 0:
# 0 + self = self (to allow sum([parameter1, parameter2...])
return self
if abs(self._step - other._step) > EPS:
s = "Warning: parameters {} and {} have different sampling steps"
print s.format(self, other)
if abs(self._maxjumpsize - other._maxjumpsize) > EPS:
s = "Warning: parameters {} and {} have different max jump size"
print s.format(self, other)
if self.nsample != other.nsample:
raise Exception("Parameters must have the same nb of samples")
m = Parameter(name=u"{} + {}".format(self.name, other.name),
minval=self._minval + other._minval,
maxval=self._maxval + other._maxval,
step=max(self._step, other._step),
maxjumpsize=max(self._maxjumpsize, other._maxjumpsize),
startval=self.current() + other.current(),
nmaxsample=max(np.size(self.samples), np.size(other.samples)))
# filling existing samples
m.nsample = self.nsample
m.samples[:m.nsample] = self.samples[:m.nsample] + other.samples[:m.nsample]
return m
def __radd__(self, other):
return self + other
def current(self):
"""
Current value
"""
return self.values[self._currentindex]
def next(self):
"""
Next proposed value
"""
if self._proposednextindex is None:
raise Exception("No next value proposed yet.")
return self.values[self._proposednextindex]
def propose_next(self):
"""
Proposing next value, using a random walk that samples
uniformly the parameter space
"""
if self._nvalue > 1 and not self.frozen:
self._proposednextindex = random_walk_nextindex(
self._currentindex, self._nvalue, neighborhoods=self._neighborhoods)
else:
self._proposednextindex = self._currentindex
return self.values[self._proposednextindex]
def accept_move(self):
"""
Moving to proposed next value
"""
if self._proposednextindex is None:
raise Exception("No next value proposed yet.")
self._currentindex = self._proposednextindex
self._proposednextindex = None
def addsample(self):
"""
Adding current parameter value to samples
"""
if self.nsample >= len(self.samples):
raise Exception("Max number of samples reached")
self.samples[self.nsample] = self.current()
self.nsample += 1
def hist(self, ax=None, nburnt=0, xlabel=None):
"""
Plotting histogram of samples value or samples increment
"""
# creating figure if not given as input
fig = None
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
# histogram of value of samples
bins = np.arange(self._minval - 0.5 * self._step,
self._maxval + 1.5 * self._step,
self._step)
samples = self.samples[nburnt:self.nsample]
ax.hist(samples, bins=bins, normed=True, label='sampled distribution')
# prior (uniform) distribution
if self._maxval > self._minval:
x = 2 * [self._minval] + 2 * [self._maxval]
y = [0.0] + 2 * [1.0 / (self._maxval - self._minval)] + [0.0]
ax.plot(x, y, '-', lw=2, color='grey', label='prior distribution')
# legend, labels and title
ax.legend(loc='upper right', fontsize=10, framealpha=0.8)
ax.set_xlabel(self.name if not xlabel else xlabel)
ax.set_ylabel('Probability density')
ax.set_title('Nb of samples: {}'.format(len(samples)))
ax.grid(True)
# statistics
s = "Mean & std dev:\n{:.3G} +/- {:.3G}".format(np.mean(samples),
np.std(samples))
quantiles = np.percentile(samples, [2.5, 97.5])
s += "\n95% confidence interval:\n{:.3G}, {:.3G}".format(*quantiles)
ax.text(min(ax.get_xlim()), max(ax.get_ylim()), s,
fontdict={'fontsize': 10},
horizontalalignment='left',
verticalalignment='top',
bbox={'color': 'w', 'alpha': 0.8})
if fig:
fig.show()
def plot(self):
plt.plot(self.samples[:self.nsample], '-')
plt.xlabel('Sample number')
plt.ylabel(self.name)
plt.show()
def accept_move(misfit_current, likelihood_current, misfit_proposednext):
"""
Is move accepted? Yes with probability P = L_next / L_current,
with L = likelihood
@rtype: bool
"""
if misfit_proposednext <= misfit_current:
return True
# gaussian likelihood
P = np.exp(-misfit_proposednext) / likelihood_current
return True if sample_uniform() < P else False
def random_walk_nextindex(currentindex, npoints, maxstepsize=1, neighborhoods=None):
"""
Next index of a random walk that samples uniformly all
indexes between 0 and npoints - 1.
Neighbors are either points sperated by up to *maxstepsize*
from current index, or are given by *neighborhoods[currentindex]*.
Next index is chosen equiprobably amongst neighbors.
If the proposed next index has less (or equal) neighbors than
the current index, the move is always accepted. Else, the
move is accepted with probability:
P = n_neighbors_current / n_neighbors_next
@type currentindex: int
@type npoints: int
@type maxstepsize: int
@type neighborhoods: list of (list of int)
@rtype: int
"""
# neighborhood of current point...
if neighborhoods:
# ... given as input
ineighbours = neighborhoods[currentindex]
else:
# ... all points of the grid separated by up to
# *maxstepsize* from current point
ineighbours = neighborhood(currentindex, npoints, maxdist=maxstepsize)
# proposed move, chosen equiprobably amongst neighbours
u = sample_uniform()
nextindex = ineighbours[int(u * len(ineighbours))]
# nb of neighbours of proposed point
if neighborhoods:
nnextneighbours = len(neighborhoods[nextindex])
else:
dist2edge = min(nextindex, npoints - 1 - nextindex)
nnextneighbours = maxstepsize + min(maxstepsize, dist2edge)
# the move is accepted with probability
# P = min(1, nb current neighbours / nb next neighbours)
P = float(len(ineighbours)) / float(nnextneighbours)
return nextindex if (P >= 1 or sample_uniform() < P) else currentindex
def random_walk(start, grid, nstep=np.Infinity, maxstepsize=1, likelihood=None):
"""
[Metropolis] random walk with jumps of up to *maxstepsize* that:
- samples uniformly all values of *grid* if no *likelihood*
function is given (all moves are accepted)
- accepts the moves with probability L_new / L_current,
(where L is the likelihood) if a *likelihood* function is
given, thus sampling k.U(x).L(x)
Returns an interator of length *nstep*, or an infinite
iterator if nstep = infinity.
@type start: float
@type grid: L{numpy.ndarray}
@type nstep: int
@type maxstepsize: int
@type likelihood: function
"""
if not min(grid) <= start <= max(grid):
raise Exception("Starting point not within grid limits")
# preparing list of neighborhoods
neighborhoods = []
npoints = len(grid)
for i in range(npoints):
neighborhoods.append(neighborhood(i, npoints, maxdist=maxstepsize))
# starting index and step nb
currentindex = np.abs(grid - start).argmin()
for _ in range(nstep):
# yielding current value
yield grid[currentindex]
# likelihood of current point (1 if no likelihood func given)
L_current = likelihood(grid[currentindex]) if likelihood else 1.0
# proposed ove
nextindex = random_walk_nextindex(currentindex, npoints,
maxstepsize=maxstepsize,
neighborhoods=neighborhoods)
# probability to accept move (always accepted is no likelihood func given)
L_new = likelihood(grid[nextindex]) if likelihood else L_current + 1.0
P = L_new / L_current
currentindex = nextindex if P >= 1.0 or sample_uniform() <= P else currentindex
def sample_uniform():
"""
Returns a single sample of the uniform distribution
U(0, 1), from samples drawn and stored in a global
variable.
"""
global samples_uniform, isample_uniform
# sample of U(0, 1)
u = samples_uniform[isample_uniform]
# moving to next index of samples global array
isample_uniform += 1
if isample_uniform >= len(samples_uniform):
# exhausted all samples -> re-drawing samples from U(0, 1)
samples_uniform = np.random.uniform(size=SIZE_SAMPLES_UNIFORM)
isample_uniform = 0
return u
def neighborhood(index, npoints, maxdist=1):
"""
Returns the neighbourhood of the current index,
= all points of the grid separated by up to
*maxdist* from current point.
@type index: int
@type npoints: int
@type maxdist int
@rtype: list of int
"""
return [index + i for i in range(-maxdist, maxdist + 1)
if i != 0 and 0 <= index + i <= npoints - 1] | gpl-3.0 |
michigraber/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
jorik041/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
seberg/numpy | doc/source/reference/random/performance.py | 6 | 2557 | from timeit import repeat
import pandas as pd
import numpy as np
from numpy.random import MT19937, PCG64, PCG64DXSM, Philox, SFC64
PRNGS = [MT19937, PCG64, PCG64DXSM, Philox, SFC64]
funcs = {}
integers = 'integers(0, 2**{bits},size=1000000, dtype="uint{bits}")'
funcs['32-bit Unsigned Ints'] = integers.format(bits=32)
funcs['64-bit Unsigned Ints'] = integers.format(bits=64)
funcs['Uniforms'] = 'random(size=1000000)'
funcs['Normals'] = 'standard_normal(size=1000000)'
funcs['Exponentials'] = 'standard_exponential(size=1000000)'
funcs['Gammas'] = 'standard_gamma(3.0,size=1000000)'
funcs['Binomials'] = 'binomial(9, .1, size=1000000)'
funcs['Laplaces'] = 'laplace(size=1000000)'
funcs['Poissons'] = 'poisson(3.0, size=1000000)'
setup = """
from numpy.random import {prng}, Generator
rg = Generator({prng}())
"""
test = "rg.{func}"
table = {}
for prng in PRNGS:
print(prng)
col = {}
for key in funcs:
t = repeat(test.format(func=funcs[key]),
setup.format(prng=prng().__class__.__name__),
number=1, repeat=3)
col[key] = 1000 * min(t)
col = pd.Series(col)
table[prng().__class__.__name__] = col
npfuncs = {}
npfuncs.update(funcs)
npfuncs['32-bit Unsigned Ints'] = 'randint(2**32,dtype="uint32",size=1000000)'
npfuncs['64-bit Unsigned Ints'] = 'randint(2**64,dtype="uint64",size=1000000)'
setup = """
from numpy.random import RandomState
rg = RandomState()
"""
col = {}
for key in npfuncs:
t = repeat(test.format(func=npfuncs[key]),
setup.format(prng=prng().__class__.__name__),
number=1, repeat=3)
col[key] = 1000 * min(t)
table['RandomState'] = pd.Series(col)
columns = ['MT19937', 'PCG64', 'PCG64DXSM', 'Philox', 'SFC64', 'RandomState']
table = pd.DataFrame(table)
order = np.log(table).mean().sort_values().index
table = table.T
table = table.reindex(columns)
table = table.T
table = table.reindex([k for k in funcs], axis=0)
print(table.to_csv(float_format='%0.1f'))
rel = table.loc[:, ['RandomState']].values @ np.ones(
(1, table.shape[1])) / table
rel.pop('RandomState')
rel = rel.T
rel['Overall'] = np.exp(np.log(rel).mean(1))
rel *= 100
rel = np.round(rel)
rel = rel.T
print(rel.to_csv(float_format='%0d'))
# Cross-platform table
rows = ['32-bit Unsigned Ints','64-bit Unsigned Ints','Uniforms','Normals','Exponentials']
xplat = rel.reindex(rows, axis=0)
xplat = 100 * (xplat / xplat.MT19937.values[:,None])
overall = np.exp(np.log(xplat).mean(0))
xplat = xplat.T.copy()
xplat['Overall']=overall
print(xplat.T.round(1))
| bsd-3-clause |
arkatebi/DynamicalSystems | toggleSwitch/tSwitch-det-pSet-6.py | 1 | 9567 | #/usr/bin/env python
import auxiliary_functions as aux
import PyDSTool as dst
from PyDSTool import common as cmn
import numpy as np
from matplotlib import pyplot as plt
import sys
#------------------------------------------------------------------------------#
def defineSystem():
'''
Create an object that defines the desired ODE system.
'''
# Create an object of args class from common module
DSargs = cmn.args(name='Toggle switch of two genes X and Y')
# Set the parameters:
DSargs.pars = aux.parameter_set_6()
# Set the variables:
DSargs.varspecs = aux.equations()
# Set the auxiliary functions:
DSargs.fnspecs = aux.functions()
# Set initial conditions:
DSargs.ics = {'X': 10, 'Y': 10}
DSargs.xdomain = {'X': [0, 1.0e+4], 'Y':[0, 1.0e+4]}
# Set the range of integration:
DSargs.tdomain = [0,100]
return DSargs
#------------------------------------------------------------------------------#
def t_dynamics_X(pts):
# PyPlot commands
plt.plot(pts['t'], pts['X'])
plt.xlabel('t') # Axes labels
plt.ylabel('X') # ...
#plt.xlim([0,7000])
plt.ylim([0,200]) # Range of the y axis
plt.title(ode.name) # Figure title from model name
plt.show()
plt.figure()
#------------------------------------------------------------------------------#
def t_dynamics_Y(pts):
# PyPlot commands
plt.plot(pts['t'], pts['Y'])
plt.xlabel('t') # Axes labels
plt.ylabel('Y') # ...
#plt.xlim([0,7000])
plt.ylim([0,200]) # Range of the y axis
plt.title(ode.name) # Figure title from model name
plt.show()
plt.figure()
#------------------------------------------------------------------------------#
def t_dynamics_XY(pts):
# PyPlot commands
plt.plot(pts['X'], pts['Y'])
plt.xlabel('X') # Axes labels
plt.ylabel('Y') # ...
#plt.xlim([0,7000])
plt.ylim([0,800]) # Range of the y axis
plt.title(ode.name) # Figure title from model name
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_X(ode):
plt.ylim([0,200])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, x0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'X': x0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['X'])
plt.xlabel('time')
plt.ylabel('X')
plt.title(ode.name + ' multi ICs')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_Y(ode):
plt.ylim([0,200])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, y0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'Y': y0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['Y'])
plt.xlabel('time')
plt.ylabel('Y')
plt.title(ode.name + ' multi ICs')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_X(ode):
plt.figure()
plt.ylim([0,900])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, x0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'X': x0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['X'])
plt.xlabel('time')
plt.ylabel('X')
plt.title(ode.name + ' multi ICs X')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_Y(ode):
plt.figure()
plt.ylim([0,900])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, y0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'Y': y0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['Y'])
plt.xlabel('time')
plt.ylabel('Y')
plt.title(ode.name + ' multi ICs Y')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_XY(ode):
plt.figure()
plt.ylim([0,900])
# Sequences of plot commands will not clear existing figures:
plt.hold(True)
for i, x0 in enumerate(np.linspace(1,1000,4)):
for i, y0 in enumerate(np.linspace(1,1000,4)):
# Reset the initial conditions in the Vode_ODEsystem object ode:
ode.set(ics = { 'X': x0, 'Y': y0 } )
# Trajectories are called pol0, pol1, ...
# Sample them on the fly to create tmp, a Pointset object:
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['X'], tmp['Y'])
plt.xlabel('X')
plt.ylabel('Y')
#plt.title(ode.name + ': multi ICs for both')
plt.show()
#plt.savefig('./figures/parSet-1_tdynamics.pdf')
#------------------------------------------------------------------------------#
def getBifDiagrams(ode):
freepar='gX'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=50,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,200], ylim=[0,700], fontsize=10)
freepar='gY'
fp=aux.fast_fixedpoint(ode)
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=50,
maxstep=1e+1, minstep=1e-2, step=1e-1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,200], ylim=[0,700], fontsize=10)
sys.exit(0)
freepar='kX'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
freepar='kY'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
sys.exit(0)
freepar='lX'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
freepar='lY'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
sys.exit(0)
#------------------------------------------------------------------------------#
def getNullClines(DSargs, ode):
from PyDSTool.Toolbox import phaseplane as pp
vlim = {'X': [1, 700], 'Y': [1, 700]}
fp = aux.eliminate_redundants(pp.find_fixedpoints(ode, n=2, maxsearch=1e+4,
eps=1e-12),
4)
stab = aux.stability(fp, ode)
for i in range(len(fp)):
print(stab[i], fp[i])
nfp=0
aux.nullclines(['X','Y'], DSargs, stab, fp, nfp=nfp, vlim=vlim,
maxpoints=[800,800],
xticks=[0, 100, 200, 300, 400, 500, 600, 700],
yticks=[0, 100, 200, 300, 400, 500, 600, 700],
step=0.01, minstep=0.001, maxstep=10, fs=[3,3],
fontsize=8, silence=False)
#------------------------------------------------------------------------------#
if __name__ == '__main__':
DSargs = defineSystem()
# Obtain a Vode_ODEsystem object:
# (similar to VODE from SciPy)
ode = dst.Generator.Vode_ODEsystem(DSargs)
# Obtain a Trajectory object (integrate ODE):
traj = ode.compute('polarization')
# Collect data points as a Pointset object:
pts = traj.sample(dt=0.01)
#t_dynamics_X(pts)
#t_dynamics_Y(pts)
#t_dynamics_XY(pts)
#t_dynamics_multi_ICs_X(ode)
#t_dynamics_multi_ICs_Y(ode)
#t_dynamics_multi_ICs_XY(ode)
#getBifDiagrams(ode)
getNullClines(DSargs, ode)
| gpl-3.0 |
nelson-liu/scikit-learn | sklearn/linear_model/tests/test_huber.py | 54 | 7619 | # Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_false
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make sure
# that the number of decimal places used is somewhat insensitive to the
# amplitude of the coefficients and therefore to the scale of the data
# and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True)
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert_false(np.all(n_outliers_mask_1))
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=10000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
if huber_warm.n_iter_ is not None:
assert_equal(0, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
| bsd-3-clause |
ajrichards/notebook | visualization/make-poisson-with-outlier.py | 2 | 1403 | #!/usr/bin/env python
"""
show what outliers are using a poisson distirbution
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
plt.style.use('seaborn')
SMALL_SIZE = 18
MEDIUM_SIZE = 20
LARGE_SIZE = 22
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=LARGE_SIZE) # fontsize of the figure title
## declare the figure
fig = plt.figure(figsize=(15,8))
ax = fig.add_subplot(111)
a = np.arange(15)
poi = stats.poisson
lambda_ = [2.0, 5.0]
colours = ["#1E88E5", "#F4511E"]
plt.bar(a, poi.pmf(a, lambda_[0]), color=colours[0],
label="$\lambda = %.1f$" % lambda_[0], alpha=0.60,
edgecolor=colours[0], lw="3")
plt.bar([12.0], [0.05] , color=colours[1],
label="$\lambda = %.1f$" % lambda_[1], alpha=0.60,
edgecolor=colours[1], lw="3")
plt.xticks(a + 0.4, a)
#plt.legend()
plt.ylabel("Probability of $k$")
plt.xlabel("$k$")
#plt.title("Probability mass function of a Poisson");
plt.savefig("poisson-with-outliers.png", dpi=600)
plt.show()
| bsd-3-clause |
mfjb/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
abonneton/TimeSeriesDecisionTrees | tools/generate_increasing_decreasing_data.py | 1 | 2032 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import sys
data_directory = sys.argv[1]
plot_directory = sys.argv[2]
num_instances = 100
noise = 0.3
h = 10
vector_size = 100
def generateStraightLineCoefficients():
a = abs(np.random.normal(0, 0.1, 1)[0])
a = a * 64 * h / vector_size
eps = np.random.normal(0, 1, 1)[0] * noise
b = h - a * vector_size/2 * (1 + eps)
return (a,b)
class Increase:
def __init__(self):
(self.a, self.b) = generateStraightLineCoefficients()
def value(self, t):
eps = np.random.normal(0, 1, 1)[0] * noise
return (self.a*t + self.b) * (1 + eps)
class Decrease:
def __init__(self):
(self.a, self.b) = generateStraightLineCoefficients()
def value(self, t):
eps = np.random.normal(0, 1, 1)[0] * noise
return -(self.a*t + self.b) * (1 + eps)
class Flat:
def __init__(self):
eps = np.random.normal(0, 1, 1)[0] * noise
self.b = h * (1 + eps)
def value(self, t):
eps = np.random.normal(0, 1, 1)[0] * noise
return self.b * (1 + eps)
def toCSV(array):
return ",".join(map(lambda x: str(x), array))
def generate_data(data_generator, label, label_0_1, num_instances,
noise, color, f):
x = range(vector_size)
for j in range(num_instances):
generator = data_generator()
y = range(vector_size)
for i in range(vector_size):
y[i] = generator.value(x[i])
plt.plot(x, y, color = color)
filename = label + '-' + str(j) + '-' + str(noise)
full_filename = plot_directory + filename + ".pdf"
print >>f, filename + "," + str(label_0_1) + "," + toCSV(y)
plt.savefig(full_filename)
plt.clf()
f = open(data_directory + 'increase-decrease.txt', 'w')
print >>f, str(vector_size)
generate_data(Increase, "increase", 0, num_instances, noise, "green", f)
generate_data(Decrease, "decrease", 1, num_instances, noise, "red", f)
f.close()
| gpl-3.0 |
arahuja/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
Edeleon4/PoolShark | flask/wildnature/WebProject1/obj/Release/Package/PackageTmp/libraries/pabuehle_utilities_general_v0.py | 2 | 57822 | # -*- coding: utf-8 -*-
###############################################################################
# Description:
# This is a collection of utility / helper functions.
# Note that most of these functions are not well tested, but are
# prototyping implementations. Also, this is my first time working with
# python, so things aren't as optimized as they could be.
#
# Typical meaning of variable names:
# lines,strings = list of strings
# line,string = single string
# xmlString = string with xml tags
# table = 2D row/column matrix implemented using a list of lists
# row,list1D = single row in a table, i.e. single 1D-list
# rowItem = single item in a row
# list1D = list of items, not necessarily strings
# item = single item of a list1D
# slotValue = e.g. "terminator" in: play <movie> terminator </movie>
# slotTag = e.g. "<movie>" or "</movie>" in: play <movie> terminator </movie>
# slotName = e.g. "movie" in: play <movie> terminator </movie>
# slot = e.g. "<movie> terminator </movie>" in: play <movie> terminator </movie>
#
# TODO:
# - change when possible to use list comprehensions
# - second utilities function with things that require external libraries (numpy, scipy, ...)
# - dedicated class for xmlString representation and parsing.
# - need to verify that all functions read/write/expect strings in utf-8 format
###############################################################################
#import os, time
import random, os, re, copy, sys, collections, pickle, pdb, stat, codecs, xmltodict #, matplotlib as plt
import matplotlib.pyplot as plt
from itertools import chain
from math import *
#if not on azure: import xmltodict
#################################################
#variable definitions
#################################################
questionWords = {}
questionWords["dede"] = ["wer", "wie", "was", "wann", "wo", "warum", "woher", "wen", "wem", "wohin", "wieso", "welche", "wieviel"]
questionWords["frfr"] = ["est-ce", "est - ce", "est ce", "estce", "quand", "pourquoi", "quel", "quelle", "que", "qui", "ou", "où", "combien", "comment", "quand"]
questionWords["eses"] = ["qué", "que", "quién", "quien", "quiénes", "quienes", "cuándo", "cuando", "cómo", "como", "dónde", "donde", "por qué", "por que", "cuánto", "cuanto", "cuántos", "cuantos", "cuántas", "cuantas", "cuánta", "cuanta" "cuál", "cual", "cuáles", "cuales", "cuál", "cual"]
questionWords["itit"] = ["chi", "che", "chiunque", "dove", "perché", "perche", "qualcuno", "quale", "quando", "quanto"] #"come"=="how"
questionWords["ptbr"] = ["aonde", "onde", "quando", "quanto", "quantos", "que", "quê", "quem", "porque", "qual", "quais", "como", "cade", "pode"]
questionWords["ptpt"] = questionWords["ptbr"]
#normalization for different languages. preserves the character counts.
textNormalizationLUT_dede = dict([ ["Ä","A"], ["Ö","O"], ["Ü","U"], ["ä","a"], ["ö","o"], ["ü","u"], ["ß","s"] ])
textNormalizationLUT_frfr = dict([ ["À","A"], ["à","a"], ["Â","A"], ["â","a"], ["Æ","A"], ["æ","a"], ["Ç","C"], ["ç","c"], ["È","E"], ["è","e"], ["É","E"], ["é","e"], ["Ê","E"], ["ê","e"], ["Ë","E"], ["ë","e"], ["Î","I"], ["î","i"], ["Ï","I"], ["ï","i"], ["Ô","O"], ["ô","o"], ["Œ","O"], ["œ","o"], ["Ù","U"], ["ù","u"], ["Û","U"], ["û","u"], ["Ü","U"], ["ü","u"], ["Ÿ","Y"], ["ÿ","y"] ])
textNormalizationLUT_eses = dict([ ["Á","A"], ["É","E"], ["Í","I"], ["Ñ","N"], ["Ó","O"], ["Ú","U"], ["Ü","U"], ["á","a"], ["é","e"], ["í","i"], ["ñ","n"], ["ó","o"], ["ú","u"], ["ü","u"], ["¿","?"], ["¡","!"] ])
textNormalizationLUT_itit = dict([ ["À","A"], ["È","E"], ["É","E"], ["Ì","I"],["Í","I"], ["Î","I"], ["Ò","O"], ["Ó","O"], ["Ù","U"], ["à","a"], ["è","e"], ["é","e"], ["ì","i"], ["í","i"], ["î","i"], ["ò","o"], ["ó","o"], ["ù","u"] ])
#################################################
# file access
#################################################
def readFile(inputFile):
#reading as binary, to avoid problems with end-of-text characters
#note that readlines() does not remove the line ending characters
with open(inputFile,'rb') as f:
lines = f.readlines()
#lines = [unicode(l.decode('latin-1')) for l in lines] convert to uni-code
return [removeLineEndCharacters(s) for s in lines];
def readBinaryFile(inputFile):
with open(inputFile,'rb') as f:
bytes = f.read()
return bytes
def readFirstLineFromFile(inputFile):
with open(inputFile,'rb') as f:
line = f.readline()
return removeLineEndCharacters(line);
#if getting memory errors, use 'readTableFileAccessor' instead
def readTable(inputFile, delimiter='\t', columnsToKeep=None):
lines = readFile(inputFile);
if columnsToKeep != None:
header = lines[0].split(delimiter)
columnsToKeepIndices = listFindItems(header, columnsToKeep)
else:
columnsToKeepIndices = None;
return splitStrings(lines, delimiter, columnsToKeepIndices)
class readFileAccessorBase:
def __init__(self, filePath, delimiter):
self.fileHandle = open(filePath,'rb')
self.delimiter = delimiter
self.lineIndex = -1
def __iter__(self):
return self
def __exit__(self, dummy1, dummy2, dummy3):
self.fileHandle.close()
def __enter__(self):
pass
def next(self):
self.lineIndex += 1
line = self.fileHandle.readline()
line = removeLineEndCharacters(line)
if self.delimiter != None:
return splitString(line, delimiter='\t', columnsToKeepIndices=None)
else:
return line
#"This looks wrong: self.index==0 never reached?!"
#if self.index == 0:
# raise StopIteration
#iterator-like file accessor. use e.g. within "for line in readTableFileAccessor("input.txt"):" loop
class readTableFileAccessor(readFileAccessorBase):
def __init__(self, filePath, delimiter = '\t'):
readFileAccessorBase.__init__(self, filePath, delimiter)
class readFileAccessor(readFileAccessorBase):
def __init__(self, filePath):
readFileAccessorBase.__init__(self, filePath, None)
def writeFile(outputFile, lines, header=None, encoding=None):
if encoding == None:
with open(outputFile,'w') as f:
if header != None:
f.write("%s\n" % header)
for line in lines:
f.write("%s\n" % line)
else:
with codecs.open(outputFile, 'w', encoding) as f: #e.g. encoding=utf-8
if header != None:
f.write("%s\n" % header)
for line in lines:
f.write("%s\n" % line)
def writeTable(outputFile, table, header=None):
lines = tableToList1D(table) #better name: convertTableToLines
writeFile(outputFile, lines, header)
def writeBinaryFile(outputFile, data):
with open(outputFile,'wb') as f:
bytes = f.write(data)
return bytes
def loadFromPickle(inputFile):
with open(inputFile, 'rb') as filePointer:
data = pickle.load(filePointer)
return data
def saveToPickle(outputFile, data):
p = pickle.Pickler(open(outputFile,"wb"))
p.fast = True
p.dump(data)
def makeDirectory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
#removes just the files in the dir, not recursively
def makeOrClearDirectory(directory):
makeDirectory(directory)
files = os.listdir(directory)
for file in files:
filePath = directory +"/"+ file
os.chmod(filePath, stat.S_IWRITE )
if not os.path.isdir(filePath):
os.remove(filePath)
def removeWriteProtectionInDirectory(directory):
files = os.listdir(directory)
for file in files:
filePath = directory +"/"+ file
if not os.path.isdir(filePath):
os.chmod(filePath, stat.S_IWRITE )
def deleteFile(filePath):
if os.path.exists(filePath):
os.remove(filePath)
def deleteAllFilesInDirectory(directory, fileEndswithString):
for filename in getFilesInDirectory(directory):
if filename.lower().endswith(fileEndswithString):
deleteFile(directory + filename)
def getFilesInDirectory(directory, postfix = ""):
fileNames = [s for s in os.listdir(directory) if not os.path.isdir(directory+"/"+s)]
if postfix == "":
return fileNames
else:
return [s for s in fileNames if s.lower().endswith(postfix)]
def getDirectoriesInDirectory(directory):
return [s for s in os.listdir(directory) if os.path.isdir(directory+"/"+s)]
#################################################
# 1D list
#################################################
def isempty(listND):
if len(listND) == 0:
return True
return False
def find(list1D, func):
return [index for (index,item) in enumerate(list1D) if func(item)]
def listFindItems(list1D, itemsToFind):
indices = [];
list1DSet = set(list1D)
for item in itemsToFind:
if item in list1DSet:
index = list1D.index(item) #returns first of possibly multiple hits
indices.append(index)
return indices
def listFindItem(list1D, itemToFind):
index = [];
if itemToFind in list1D:
index = list1D.index(itemToFind) #returns first of possibly multiple hits
return index
#ex: list1D = ['this', 'is', 'a', 'test']; itemToFindList = ['is','a']
def listFindSublist(list1D, itemToFindList1D):
matchingStartItemIndices = []
nrItemsInItemToFindList = len(itemToFindList1D)
for startIndex in range(len(list1D)-nrItemsInItemToFindList+1):
endIndex = startIndex + nrItemsInItemToFindList -1
#print list1D[startIndex:endIndex+1]
if list1D[startIndex:endIndex+1] == itemToFindList1D:
matchingStartItemIndices.append(startIndex)
return matchingStartItemIndices
def listExists(stringToFind, strings, ignoreCase=False):
for string in strings:
if stringEquals(stringToFind, string, ignoreCase):
return True
return False
def listFindSubstringMatches(lines, stringsToFind, containsHeader, ignoreCase):
indices = []
for (index,line) in enumerate(lines):
if containsHeader and index==0:
indices.append(0)
else:
for stringToFind in stringsToFind:
if ignoreCase:
stringToFind = stringToFind.upper()
line = line.upper()
if line.find(stringToFind) >= 0:
indices.append(index)
break
return indices
def listSort(list1D, reverseSort=False, comparisonFct=lambda x: x):
indices = range(len(list1D))
tmp = sorted(zip(list1D,indices), key=comparisonFct, reverse=reverseSort)
list1DSorted, sortOrder = map(list, zip(*tmp))
return (list1DSorted, sortOrder)
def listExtract(list1D, indicesToKeep):
indicesToKeepSet = set(indicesToKeep)
return [item for index,item in enumerate(list1D) if index in indicesToKeepSet]
def listRemove(list1D, indicesToRemove):
indicesToRemoveSet = set(indicesToRemove)
return [item for index,item in enumerate(list1D) if index not in indicesToRemoveSet]
def listReverse(list1D):
return list1D[::-1]
def listRemoveDuplicates(strings):
newList = []
newListSet = set()
newListIndices = []
for index,string in enumerate(strings):
if string not in newListSet:
newList.append(string)
newListSet.add(string)
newListIndices.append(index)
return (newList, newListIndices)
def listRemoveEmptyStrings(strings):
indices = find(strings, lambda x: x!="")
return getRows(strings, indices);
def listRemoveEmptyStringsFromEnd(strings):
while len(strings)>0 and strings[-1] == "":
strings = strings[:-1]
return strings
def listIntersection(strings, referenceStrings):
#return how many items in "strings" also occur in "referenceStrings"
intersectingStrings = []
referenceSet = set(referenceStrings)
for string in strings:
if string in referenceSet:
intersectingStrings.append(string)
return intersectingStrings
def listsIdenticalExceptForPermutation(listA, listB):
if len(listA) != len(listB):
return False
#note: avoid sorting by making this histogram/dictionary based
listASorted = sorted(listA)
listBSorted = sorted(listB)
for (elemA, elemB) in zip(listASorted,listBSorted):
if elemA!=elemB:
return False
return True
def listAverage(numbers):
return 1.0 *sum(numbers) / len(numbers)
def listProd(numbers):
product = 1
for num in numbers:
product *= num
return product
#################################################
# 2D list (e.g. tables)
#################################################
def getColumn(table, columnIndex):
column = [];
for row in table:
column.append(row[columnIndex])
return column
def getRows(table, rowIndices):
newTable = [];
for rowIndex in rowIndices:
newTable.append(table[rowIndex])
return newTable
def getColumns(table, columnIndices):
newTable = [];
for row in table:
rowWithColumnsRemoved = [row[index] for index in columnIndices]
newTable.append(rowWithColumnsRemoved)
return newTable
#creates a longer table by splitting items of a given row
def splitColumn(table, columnIndex, delimiter):
newTable = [];
for row in table:
items = row[columnIndex].split(delimiter)
for item in items:
row = list(row) #make copy
row[columnIndex]=item
newTable.append(row)
return newTable
def sortTable(table, columnIndexToSortOn, reverseSort=False, comparisonFct=lambda x: float(x[0])):
if len(table) == 0:
return []
columnToSortOnData = getColumn(table, columnIndexToSortOn)
(dummy, sortOrder) = listSort(columnToSortOnData, reverseSort, comparisonFct)
sortedTable = [];
for index in sortOrder:
sortedTable.append(table[index])
return sortedTable
def removeColumnsFromFileUsingGawk(headerString, columnNamesToKeep, inputFile, outputFile, delimiter='\t'):
header = headerString.split(delimiter);
columnIndicesToKeep = listFindItems(header, columnNamesToKeep)
removeColumnsFromFileUsingGawkGivenIndices(columnIndicesToKeep, inputFile, outputFile)
def removeColumnsFromFileUsingGawkGivenIndices(columnIndicesToKeep, inputFile, outputFile):
#Use this function when file is too large to be loaded into memory
gawkIndicesString = ""
for index in columnIndicesToKeep:
gawkIndicesString = gawkIndicesString + " $" + str(index+1) + ","
gawkIndicesString = gawkIndicesString[:-1]
gawkCmdString = "gawk -F'\t' 'BEGIN {OFS="+'"\t"'+"} {print" + gawkIndicesString + "}' " + inputFile + " > " + outputFile
os.popen(gawkCmdString)
def flattenTable(table):
return [x for x in reduce(chain, table)]
def tableToList1D(table, delimiter='\t'):
return [delimiter.join([str(s) for s in row]) for row in table]
# def convertTableToStrings(table, delimiter='\t'):
# delimiterSeparatedLines = [];
# for row in table:
# #row = [str(w) for w in row]
# delimiterSeparatedLine = delimiter.join(map(str,row))
# delimiterSeparatedLines.append(delimiterSeparatedLine)
# return delimiterSeparatedLines
def getNthListElements(list2D, index):
return [list1D[index] for list1D in list2D]
#map label names to integers
def parseValueKeyTable(valueKeyTable):
valueToKeyLUT = dict()
keyToValueLUT = dict()
for line in valueKeyTable:
value = int(line[0])
key = line[1]
valueToKeyLUT[value] = key
keyToValueLUT[key] = value
return(keyToValueLUT, valueToKeyLUT)
#################################################
# ND list (e.g. tables)
#################################################
def endStripList(listND, itemToRemove=''):
if listND == []:
return listND
currPos = len(listND)-1
while listND[currPos] == itemToRemove:
currPos -= 1
if currPos<0:
break
return [item for index,item in enumerate(listND) if index <= currPos]
#################################################
# string
#################################################
def insertInString(string, pos, stringToInsert):
return insertInString(string, pos, pos, stringToInsert)
def insertInString(string, textToKeepUntilPos, textToKeepFromPos, stringToInsert):
return string[:textToKeepUntilPos] + stringToInsert + string[textToKeepFromPos:]
def removeMultipleSpaces(string):
return re.sub('[ ]+' , ' ', string)
def removeLineEndCharacters(line):
if line.endswith('\r\n'):
return line[:-2]
elif line.endswith('\n'):
return line[:-1]
else:
return line
def replaceNthWord(string, wordIndex, wordToReplaceWith):
words = string.split()
words[wordIndex] = wordToReplaceWith
return " ".join(words)
def removeWords(string, wordsToRemove, ignoreCase=False):
newWords = []
for word in string.split():
if not listExists(word, wordsToRemove, ignoreCase):
newWords.append(word)
return " ".join(newWords)
def removeNthWord(string, wordIndex):
words = string.split()
if wordIndex == 0:
stringNew = words[1:]
elif wordIndex == len(words)-1:
stringNew = words[:-1]
else:
stringNew = words[:wordIndex] + words[wordIndex+1:]
#stringNew = " ".join(stringNew)
#stringNew = re.sub('[ \t]+' , ' ', stringNew) #replace multiple spaces or tabs with a single space
return " ".join(stringNew)
def splitString(string, delimiter='\t', columnsToKeepIndices=None):
if string == None:
return None
items = string.split(delimiter)
if columnsToKeepIndices != None:
items = getColumns([items], columnsToKeepIndices)
items = items[0]
return items;
def splitStrings(strings, delimiter, columnsToKeepIndices=None):
table = [splitString(string, delimiter, columnsToKeepIndices) for string in strings]
return table;
def spliceString(string, textToKeepStartPositions, textToKeepEndPositions):
stringNew = "";
for (startPos, endPos) in zip(textToKeepStartPositions,textToKeepEndPositions):
stringNew = stringNew + string[startPos:endPos+1]
return stringNew
def findFirstSubstring(string, stringToFind, ignoreCase=False):
if ignoreCase:
string = string.upper();
stringToFind = stringToFind.upper();
return string.find(stringToFind)
def findMultipleSubstrings(string, stringToFind, ignoreCase=False):
if ignoreCase:
string = string.upper();
stringToFind = stringToFind.upper();
matchPositions = [];
pos = string.find(stringToFind)
while pos >= 0:
matchPositions.append(pos)
pos = string.find(stringToFind, pos + 1)
return matchPositions
def findMultipleSubstringsInMultipleStrings(string, stringsToFind, ignoreCase=False):
matches = []
for (stringToFindIndex,stringToFind) in enumerate(stringsToFind):
matchStartPositions = findMultipleSubstrings(string, stringToFind, ignoreCase)
for matchStartPos in matchStartPositions:
matchEndPos = matchStartPos + len(stringToFind)
matches.append([matchStartPos,matchEndPos,stringToFindIndex])
return matches
def findOccurringStringsIndices(string, stringsToFind):
matchIndices = []
for (stringToFindIndex,stringToFind) in enumerate(stringsToFind):
if string.find(stringToFind) >= 0:
matchIndices.append(stringToFindIndex)
return matchIndices
def regexMatch(string, regularExpression, matchGroupIndices):
regexMatches = re.match(regularExpression, string)
if regexMatches != None:
matchedStrings = [regexMatches.group(i) for i in matchGroupIndices]
else:
matchedStrings = [None]*len(matchGroupIndices)
if len(matchGroupIndices) == 1:
matchedStrings = matchedStrings[0]
return matchedStrings
def containsOnlyRegularAsciiCharacters(string):
return all(ord(c) < 128 for c in string)
#remove all control characters except for TAB
#see: http://www.asciitable.com/
def removeControlCharacters(string):
chars = [c for c in string if not (ord(c)>=0 and ord(c)<=8)]
chars = [c for c in string if not (ord(c)>=10 and ord(c)<=31)]
return "".join(chars)
def stringEquals(string1, string2, ignoreCase=False):
if ignoreCase:
string1 = string1.upper()
string2 = string2.upper()
return string1 == string2
def ToIntegers(list1D):
return [int(float(x)) for x in list1D]
def Round(list1D):
return [round(x) for x in list1D]
def ToFloats(list1D):
return [float(x) for x in list1D]
def ToStrings(list1D):
return [str(x) for x in list1D]
#NOTE: could just call function ToIntegers, input format is irrelevant
#def stringsToIntegers(strings):
# return [int(s) for s in strings]
#def stringsToFloats(strings):
# return [float(s) for s in strings]
#def floatsToStrings(floats):
# return [str(f) for f in floats]
#################################################
# xmlString
# slotValue = e.g. "terminator" in: play <movie> terminator </movie>
# slotTag = e.g. "<movie>" or "</movie>" in: play <movie> terminator </movie>
# slotName = e.g. "movie" in: play <movie> terminator </movie>
# slot = e.g. "<movie> terminator </movie>" in: play <movie> terminator </movie>
#
# Note that the functionality around xmlStrings is a bit brittle since some function were
# written assuming consistent xml tags (e.g. whitespace before '<' tag open characters)
#################################################
def getSlotOpenTag(slotName):
return "<"+slotName+">"
def getSlotCloseTag(slotName):
return "</"+slotName+">"
def getSlotTag(slotName, slotValue):
return getSlotOpenTag(slotName) + " " + slotValue + " " + getSlotCloseTag(slotName)
def normalizeXmlString(xmlString, mode='simple'):
if mode == 'simple':
#make sure there is a space before each '<' and after each '>', etc.
#then remove multiple white spaces, as well as trailing spaces
xmlString = xmlString.replace('<', ' <')
xmlString = xmlString.replace('>', '> ')
xmlString = xmlString.replace('?', ' ? ')
xmlString = xmlString.replace('!', ' ! ')
xmlString = xmlString.replace('.', ' . ')
xmlString = removeMultipleSpaces(xmlString)
xmlString = xmlString.strip()
else:
raise Exception('Mode unknown: ' + mode)
return xmlString
def isXmlTag(string):
if parseXmlTag(string) != None:
return True
else:
return False
def parseXmlTag(string):
isTag = False
if len(string)>2:
(tagName, isOpenTag, isCloseTag) = (None, False, False)
if string[0:2]=="</" and string[-1]==">":
(isTag, isCloseTag, tagName) = (True, True, string[2:-1])
elif string[0]=="<" and string[-1]==">":
(isTag, isOpenTag, tagName) = (True, True, string[1:-1])
if isTag == True:
return (tagName, isOpenTag, isCloseTag)
else:
return None
def renameSlotName(xmlString, oldSlotName, newSlotName):
xmlString = xmlString.replace(getSlotOpenTag(oldSlotName), getSlotOpenTag(newSlotName))
xmlString = xmlString.replace(getSlotCloseTag(oldSlotName), getSlotCloseTag(newSlotName))
return xmlString
def replaceSlotValues(xmlString, slotNameToReplace, newSlotValue):
keepLooping = True;
newXmlString = xmlString
while keepLooping:
keepLooping = False;
slots = extractSlots(newXmlString)
for slot in slots:
slotName = slot[1]
if slotName == slotNameToReplace:
(slotValueStartPos, slotValueEndPos) = slot[3:5]
oldXmlString = newXmlString
newXmlString = insertInString(newXmlString, slotValueStartPos+1, slotValueEndPos, newSlotValue)
if oldXmlString != newXmlString:
keepLooping = True;
break #break since start/end positions in "tags" have changed
return newXmlString
def replaceSlotXmlStringWithSlotName(xmlString, slotNames, slotNamePrefix="SLOT_"):
newXmlString = xmlString
for slotName in slotNames:
#match everything except for the "</" characters
newXmlString = re.sub("<" + slotName + ">(?:(?!</).)*</" + slotName + ">", slotNamePrefix + slotName.upper(), newXmlString, re.VERBOSE)
return newXmlString
def slotsFormattedCorrectly(slots, verbose=False):
if len(slots) % 2 != 0:
if verbose:
print "WARNING: odd number of slot open/close tags found: " + str(slots)
return(False)
slotNameExpected = None;
for slot in slots:
slotName = slot[0]
isOpenTag = slot[3]
if (slotNameExpected==None and not isOpenTag):
if verbose:
print "WARNING: open tag expected but instead found closing tag: " + str(slots)
return(False)
elif (not isOpenTag and slotNameExpected != slotName):
if verbose:
print "WARNING: expected closing and opening tag to have same slot name: ", (slotNameExpected,tag)
return(False)
if isOpenTag:
slotNameExpected = slotName
else:
slotNameExpected = None
return(True)
def xmlStringCanBeParsed(xmlString):
#Note: The MLGTools and/or Bitetools crashes if a hash tag is in the data
#if xmlString.find("#") >= 0 or xmlString.find(" ") >= 0 or xmlString != xmlString.strip() or not containsOnlyRegularAsciiCharacters(xmlString):
try:
extractSlots(xmlString)
return True
except:
return False
def extractSlotsHelper(xmlString, validateSlots=True):
slotStartPositions = findMultipleSubstrings(xmlString, '<')
slotEndPositions = findMultipleSubstrings(xmlString, '>')
#check if all startPositions < endPositions
if (len(slotStartPositions) != len(slotEndPositions)):
#assumes no < or > characters in query itself just in tag
raise Exception("Unequal number of '<' and '>' characters: " + xmlString)
for (slotStartPos, slotEndPos) in zip(slotStartPositions, slotEndPositions):
if slotStartPos>slotEndPos:
raise Exception("Found a '>' before a '<' character: " + xmlString)
if slotStartPos==slotEndPos-1:
raise Exception("Found an empty tag (i.e. '<>'): " + xmlString)
#loop over all tags and add to list
slots = []
for (slotStartPos, slotEndPos) in zip(slotStartPositions, slotEndPositions):
slotName = xmlString[slotStartPos+1:slotEndPos]
if slotName[0] == '/':
slotName = slotName[1:]
boIsOpenTag = False
else:
boIsOpenTag = True
if slotName.find(' ') >= 0:
raise Exception("Slot names should not contain any whitespaces: " + xmlString)
slots.append((slotName, slotStartPos, slotEndPos, boIsOpenTag))
#check if identified slots are all formatted correctly
if validateSlots and slotsFormattedCorrectly(slots)==False:
raise Exception("Identified slots for |%s| nor formatted correctly: " + str(slots))
return slots
def extractSlots(xmlString, validateSlots=True):
newSlots = [];
slots = extractSlotsHelper(xmlString, validateSlots)
for (slotIndex,slot) in enumerate(slots): #only loop over open-tags
isOpenTag = slot[3]
if slotIndex % 2 == 0:
assert(isOpenTag)
tagOpenSlotName = slot[0]
tagOpenOuterPos = slot[1]
tagOpenInnerPos = slot[2]
else:
tagCloseSlotName = slot[0]
assert(not isOpenTag)
assert(tagOpenSlotName == tagCloseSlotName)
tagCloseOuterPos = slot[2]
tagCloseInnerPos = slot[1]
slotValue = xmlString[tagOpenInnerPos+1:tagCloseInnerPos].strip()
newSlots.append((slotValue, tagCloseSlotName, tagOpenOuterPos, tagOpenInnerPos, tagCloseInnerPos, tagCloseOuterPos))
return newSlots
def extractSlotValues(xmlStrings):
slotValues = {}
for xmlString in xmlStrings:
slots = extractSlots(xmlString)
for slot in slots:
slotValue = slot[0]
slotName = slot[1]
if slotName not in slotValues:
slotValues[slotName] = []
slotValues[slotName].append(slotValue)
return slotValues
def removeTagsFromXmlString(xmlString, slotNamesToKeepOrRemove, keepOrRemove="keep", boRemoveMultipleSpaces=True, boRemovePrePostFixSpaces=True):
assert(keepOrRemove=="keep" or keepOrRemove=="remove")
slots = extractSlots(xmlString)
assert(slots != None) #todo: need to check first if can be parsed, THEN either parse or run code below.
#if slots == None:
# print 'Warning "removeTagsFromXmlString": could not parse sentence. Hence simply removing </, < or > characters" ' + xmlString
# xmlStringNoOpenCloseCharacters = xmlString
# xmlStringNoOpenCloseCharacters = xmlStringNoOpenCloseCharacters.replace('</','')
# xmlStringNoOpenCloseCharacters = xmlStringNoOpenCloseCharacters.replace('<','')
# xmlStringNoOpenCloseCharacters = xmlStringNoOpenCloseCharacters.replace('>','')
# return xmlStringNoOpenCloseCharacters
textToKeepStartPos = [0]
textToKeepEndPos = []
for slot in slots:
slotName = slot[1]
(tagOpenOuterPos, tagOpenInnerPos, tagCloseInnerPos, tagCloseOuterPos) = slot[2:6]
if (keepOrRemove=="remove") and (slotName in slotNamesToKeepOrRemove):
boRemoveTag=True;
elif (keepOrRemove=="keep") and (slotName not in slotNamesToKeepOrRemove):
boRemoveTag=True;
else:
boRemoveTag = False;
if boRemoveTag:
textToKeepEndPos.append(tagOpenOuterPos-1)
textToKeepStartPos.append(tagOpenInnerPos+1)
textToKeepEndPos.append(tagCloseInnerPos-1)
textToKeepStartPos.append(tagCloseOuterPos+1)
textToKeepEndPos.append(len(xmlString)-1)
#create new string
xmlStringNew = spliceString(xmlString, textToKeepStartPos, textToKeepEndPos).strip()
if boRemoveMultipleSpaces:
xmlStringNew = removeMultipleSpaces(xmlStringNew)
if boRemovePrePostFixSpaces:
xmlStringNew = xmlStringNew.strip()
#sanity check
slotsNew = extractSlots(xmlStringNew)
for slot in slotsNew:
if keepOrRemove=="keep" and slot[1] not in slotNamesToKeepOrRemove:
pdb.set_trace()
if keepOrRemove=="remove" and slot[1] in slotNamesToKeepOrRemove:
pdb.set_trace()
return xmlStringNew
def removeTagsFromXmlStrings(xmlStrings, slotNamesToKeepOrRemove, keepOrRemove="keep", boRemoveMultipleSpaces=True, boRemovePrePostFixSpaces=True):
return [removeTagsFromXmlString(s, slotNamesToKeepOrRemove, keepOrRemove, boRemoveMultipleSpaces, boRemovePrePostFixSpaces) for s in xmlStrings]
def removeAllTagsFromXmlString(xmlString, boRemoveMultipleSpaces=True, boRemovePrePostFixSpaces=True):
slotNamesToRemove = getSlotNameCounts([xmlString]).keys()
return removeTagsFromXmlString(xmlString, slotNamesToRemove, "remove", boRemoveMultipleSpaces, boRemovePrePostFixSpaces)
def removeAllTagsFromXmlStrings(xmlStrings, boRemoveMultipleSpaces=True, boRemovePrePostFixSpaces=True):
strings = []
for xmlString in xmlStrings:
strings.append(removeAllTagsFromXmlString(xmlString, boRemoveMultipleSpaces, boRemovePrePostFixSpaces))
return strings
def getSlotNameCounts(xmlStrings):
if not isinstance(xmlStrings, (list)):
xmlStrings = [xmlStrings]
slotCounter = collections.Counter()
for xmlString in xmlStrings:
slots = extractSlots(xmlString)
for slot in slots:
slotName = slot[1]
slotCounter[slotName] += 1
return slotCounter
def getNrSentencesWithSlots(xmlStrings):
counterTaggedSentences = 0
for xmlString in xmlStrings:
slots = extractSlots(xmlString)
if len(slots) > 0:
counterTaggedSentences += 1
return counterTaggedSentences
def getNrSentencesWithoutSlots(xmlStrings):
return len(xmlStrings) - getNrSentencesWithSlots(xmlStrings)
def findSimilarSlot(slotToFind, slotList, ignoreSlotValue=True, ignoreSlotName=True):
if ignoreSlotValue==False or ignoreSlotName==False:
print "Not supported yet"
for index,slot in enumerate(slotList):
#ignore slotValue and slotName, compare for equality: tagOpenOuterPos, tagOpenInnerPos, tagCloseInnerPos, tagCloseOuterPos
if slotToFind[2:]==slot[2:]:
return index
return -1
def convertXmlStringToIOB(xmlString, addSentenceBeginEndMarkers=False):
currentLabel = "O"
wordLabelPairs = [];
#making sure each < tag has a leading white-space, and each > has a trailing whitespace.
#then calling split which uses whitespace as separator.
words = xmlString.replace('<',' <').replace('>','> ').strip().split()
for word in words:
#assert(isXmlTag(word))
if '<' in word[1:-1] or '>' in word[1:-1]:
raise Exception("Xml string contains stray '<' or '>' characters: " + xmlString)
if isXmlTag(word):
(tagName, isOpenTag, isCloseTag) = parseXmlTag(word)
else:
(tagName, isOpenTag, isCloseTag) = (None,None,None)
if isOpenTag:
currentLabel = tagName
writeBeginMarker = True
elif isCloseTag:
currentLabel = "O"
else:
if currentLabel == "O":
labelToWrite = currentLabel
elif writeBeginMarker:
labelToWrite = "B-"+currentLabel
writeBeginMarker = False
else:
labelToWrite = "I-"+currentLabel
wordLabelPairs.append([word, labelToWrite])
if addSentenceBeginEndMarkers:
wordLabelPairs.insert(0, ["BOS","O"])
wordLabelPairs.append(["EOS", "O"])
return wordLabelPairs
def convertXmlStringsToIOB(xmlStrings):
iobFormat = []
for xmlString in xmlStrings:
wordLabelPairs = convertXmlStringToIOB(xmlString)
for wordLabelPair in wordLabelPairs:
iobFormat.append(" ".join(wordLabelPair))
iobFormat.append("")
return iobFormat[:-1]
def extractSentencesFromIOBFormat(iobLines):
sentences = []
sentence = ""
for iobLine in iobLines:
if iobLine =="":
if sentence != "":
sentences.append(sentence.strip())
sentence = ""
else:
word = iobLine.split()[0]
sentence += " " + word
return sentences
def parseXmlFile(xmlFile):
s = readFile(xmlFile)
s = " ".join(s)
return xmltodict.parse(s)
#################################################
# randomize
#################################################
def randomizeList(listND, containsHeader=False):
if containsHeader:
header = listND[0]
listND = listND[1:]
random.shuffle(listND)
if containsHeader:
listND.insert(0, header)
return listND
def getRandomListElement(listND, containsHeader=False):
if containsHeader:
index = getRandomNumber(1, len(listND)-1)
else:
index = getRandomNumber(0, len(listND)-1)
return listND[index]
def getRandomNumbers(low, high):
randomNumbers = range(low,high+1)
random.shuffle(randomNumbers)
return randomNumbers
def getRandomNumber(low, high):
randomNumber = random.randint(low,high) #getRandomNumbers(low, high)
return randomNumber #s[0]
def subsampleList(listND, maxNrSamples):
indices = range(len(listND))
random.shuffle(indices)
nrItemsToSample = min(len(indices), maxNrSamples)
return [listND[indices[i]] for i in range(nrItemsToSample)]
def randomSplit(list1D, ratio):
indices = range(len(list1D))
random.shuffle(indices)
nrItems = int(round(ratio * len(list1D)))
listA = [list1D[i] for i in indices[:nrItems]]
listB = [list1D[i] for i in indices[nrItems:]]
return (listA,listB)
#################################################
# QEPConsole
#################################################
def parseQEPConsoleOutput(qepOutputFile):
qepInfo = None
qepInfos = []
fileObject = open(qepOutputFile,'r')
for line in fileObject:
#check if start of a new query
if line.startswith("Query: "):
regularExpression = "Query: \{(.*)\}$"
query = regexMatch(line, regularExpression, [1])
if query != None:
if qepInfo != None:
qepInfos.append(qepInfo)
qepInfo = {}
qepInfo["query"] = query
qepInfo["edges"] = []
#check if Impressions
elif line.startswith("Impressions"):
startPos = line.find('[')
endPos = line.find(']')
qepInfo["Impressions"] = int(line[startPos+1:endPos])
#check if edge
elif line.startswith("Edge = ["):
regularExpression = "Edge = \[(.*)\]\{UL:(.*)\}$"
(urlCount,url) = regexMatch(line, regularExpression, [1,2])
if urlCount != None:
qepInfo["edges"].append((urlCount,url))
qepInfos.append(qepInfo)
return qepInfos
#################################################
# dictionaries
#################################################
def increaseDictValueByOne(dictionary, key, initialValue=0):
if key in dictionary.keys():
dictionary[key] += 1;
else:
dictionary[key] = initialValue + 1;
def sortDictionary(dictionary, sortIndex=0, reverseSort=False):
return sorted(dictionary.items(), key=lambda x: x[sortIndex], reverse=reverseSort)
def getDictionary(keys, values, boConvertValueToInt = True):
dictionary = {}
for key,value in zip(keys, values):
if (boConvertValueToInt):
value = int(value)
dictionary[key] = value
return dictionary
def getStringDictionary(keys, values):
dictionary = {}
for key,value in zip(keys, values):
dictionary[key] = value
return dictionary
def dictionaryToTable(dictionary):
return (dictionary.items())
#################################################
# collections.Counter()
#################################################
def countFrequencies(list1D):
frequencyCounts = collections.Counter()
for item in list1D:
frequencyCounts[item] += 1
return frequencyCounts
def countWords(sentences, ignoreCase=True):
frequencyCounts = collections.Counter()
for sentence in sentences:
words = sentence.split()
for word in words:
if ignoreCase:
word = word.lower()
frequencyCounts[word] += 1
return frequencyCounts
def convertCounterToList(counter, threshold=None):
sortedKeyValuePairs = counter.most_common()
if threshold == None:
return sortedKeyValuePairs
else:
newSortedKeyValuePairs = [];
for keyValuePair in sortedKeyValuePairs:
if keyValuePair[1] >= threshold:
newSortedKeyValuePairs.append(keyValuePair)
else:
break
return newSortedKeyValuePairs
#################################################
# confusion matrix
#################################################
def initConfusionMatrix(rowColumnNames):
confMatrix = {}
for s in rowColumnNames: #actual
confMatrix[s] = {}
for ss in rowColumnNames: #estimated
confMatrix[s][ss] = 0
return confMatrix
def printConfusionMatrix(confMatrix, rowColumnNames):
n = 6
columnWidth = max(2*n, max([len(s) for s in rowColumnNames]))
line = "(Row=actual)".ljust(columnWidth)
for columnName in rowColumnNames:
line += " | " + columnName.center(n)
line += " | " + "SUM".center(n)
print line
for actualTagName in rowColumnNames:
rowSum = 0
line = actualTagName.rjust(columnWidth)
for estimatedTagName in rowColumnNames:
value = confMatrix[actualTagName][estimatedTagName]
rowSum += value
line += " | " + str(value).center(max(n,len(estimatedTagName)))
line += " || " + str(rowSum).center(n)
print line
def plotConfusionMatrix(confMat, title='Confusion matrix', labelNames = None, colorMap=plt.cm.jet, vmin=None, vmax=None):
plt.imshow(confMat, interpolation='nearest', cmap=colorMap, vmin=vmin, vmax=vmax)
plt.title(title)
plt.ylabel('Ground truth')
plt.xlabel('Prediction')
if labelNames:
tick_marks = np.arange(len(labelNames))
plt.xticks(tick_marks, labelNames, rotation=45, ha='right')
plt.yticks(tick_marks, labelNames)
plt.colorbar()
plt.tight_layout()
def analyseConfusionMatrix(confMatrix, rowColumnNames=None):
if rowColumnNames == None:
rowColumnNames = ["pos","neg"]
posName = rowColumnNames[0]
negName = rowColumnNames[1]
tp = confMatrix[posName][posName]
fp = confMatrix[negName][posName]
fn = confMatrix[posName][negName]
return computePrecisionRecall(tp, fp, fn)
#warning: bit hacky, not well tested yet
def analyseConfusionMatrixND(confMatrix, tagNamesSubset=None):
tp = fp = tn = fn = 0
confMatrixNames = confMatrix.keys()
#only compute precision/recall etc for subset of rows/columns
if tagNamesSubset != None:
confMatrix = copy.deepcopy(confMatrix)
for actualTagName in confMatrixNames:
for estimatedTagName in confMatrixNames:
if estimatedTagName != "None" and not estimatedTagName in tagNamesSubset:
confMatrix[actualTagName]["None"] += confMatrix[actualTagName][estimatedTagName]
confMatrix[actualTagName][estimatedTagName] = 0
for actualTagName in confMatrixNames:
if not actualTagName=="None" and not actualTagName in tagNamesSubset:
confMatrix[actualTagName]["None"] = 0
#compute true positive (tp), true negative (tn), false positive (fp) and false negative (fn)
for actualTagName in confMatrixNames:
for estimatedTagName in confMatrixNames:
if estimatedTagName == "None" and actualTagName == "None":
tn += confMatrix[actualTagName][estimatedTagName]
elif estimatedTagName == "None" and actualTagName != "None":
fn += confMatrix[actualTagName][estimatedTagName]
elif estimatedTagName != "None" and actualTagName == estimatedTagName:
tp += confMatrix[actualTagName][estimatedTagName]
elif estimatedTagName != "None" and actualTagName != estimatedTagName:
fp += confMatrix[actualTagName][estimatedTagName]
(precision,recall) = computePrecisionRecall(tp, fp, fn)
return (precision, recall, tp, fp, tn, fn)
def getF1Score(precision,recall):
return 2*precision*recall/(precision+recall)
def computePrecisionRecall(tp, fp, fn):
if (tp+fp)>0:
precision = round(100.0 * tp / (tp+fp), 2)
else:
precision = -1
positives = (tp + fn)
if positives>0:
recall = round(100.0 * tp / positives, 2)
else:
recall = -1
return (precision,recall)
def computeSinglePrecisionRecall(threshold, groundTruthLabels, classifierScore, weights=None, queries=None):
#print "*****************************"
#init
classifierScore = [float(n) for n in classifierScore]
if weights != None:
weights = [float(n) for n in weights]
else:
weights = [1] * len(classifierScore)
if queries == None:
queries = ["Queries not provided..."] * len(classifierScore)
assert(len(groundTruthLabels) == len(classifierScore) == len(weights) == len(queries))
#count true positive and false positives
p = 0
n = 0
tp = fp = tn = fn = 0
tpW = fpW = tnW = fnW = 0
zeroWeightCounter = 0
for (query,groundTruthLabel,classificationScore,weight) in zip(queries,groundTruthLabels,classifierScore,weights):
#if classificationScore == 1:
# print groundTruthLabel, classificationScore, weight, query
#init
classificationLabel = int(classificationScore>threshold)
if weight == 0:
zeroWeightCounter += 1
continue
#count number of positives and negatives in test set
if groundTruthLabel == 1:
p+=1
else:
n+=1
#compute tp, fp, tn, and fn
if groundTruthLabel == classificationLabel:
if classificationLabel == 1:
tp += 1
tpW += weight
elif classificationLabel == 0:
tn += 1
tnW += weight
else:
error()
#print "CORRECT: GTLabel=%i, classificationScore=%f, weight=%i, query=%s" % (groundTruthLabel, classificationScore, weight, query)
else:
if classificationLabel == 1:
fp += 1
fpW += weight
elif classificationLabel == 0:
fn += 1
fnW += weight
else:
error()
#if classificationLabel==1: # and classificationLabel==0:
# print "WRONG: GTLabel=%i, classificationScore=%f, weight=%i, query=%s" % (groundTruthLabel, classificationScore, weight, query)
#compute p/r
assert((tp + fn) == p)
assert((fp + tn) == n)
precision,recall = computePrecisionRecall(tpW, fpW, fnW)
#precision = 100.0 * tpW / (tpW + fpW)
#recall = 100.0 * tpW / (tpW + fnW)
acc = 100.0 * (tpW + tnW) / (tpW + tnW + fpW + fnW)
return (precision, recall, acc, tpW, fpW, tnW, fnW, zeroWeightCounter)
def computePrecisionRecallVectors(thresholds, groundTruthLabels, classifierScore, weights=None, queries=None):
precisionVec = []
recallVec = []
accVec = []
for threshold in thresholds:
(precision, recall, acc) = computeSinglePrecisionRecall(threshold, groundTruthLabels, classifierScore, weights, queries)[0:3]
precisionVec.append(precision)
recallVec.append(recall)
accVec.append(acc)
return (precisionVec, recallVec, accVec)
def plotPrecisionRecallCurve(precision, recall):
area = auc(recall, precision)
plt.plot(recall, precision, label='Precision-recall curve')
plt.ylabel('Precision')
plt.xlabel('Recall')
plt.ylim([-0.02, 1.02])
plt.xlim([-0.02, 1.02])
plt.title('AUC=%0.2f' % area)
#plt.legend(loc="upper right")
plt.show()
#################################################
# sentence patterns
#################################################
def containsRegexMetaCharacter(string, regexChars = ["\\", "^", "?", ".", "+", "*", "(", ")", "[", "]", "{", "}", "|"]):
for regexChar in regexChars:
if string.find(regexChar)>=0:
return True
return False
def getRegularExpressionsFromSentencePatterns(sentencePatterns, tagNames, placeHolderFormatString, placeHolderRegEx):
return [getRegularExpressionFromSentencePattern(s, tagNames, placeHolderFormatString, placeHolderRegEx) for s in sentencePatterns]
def getRegularExpressionFromSentencePattern(sentencePattern, slotNames, placeHolderFormatString, placeHolderRegEx):
#Note this assumes at most one place holder per sentence pattern
#Example for a placeHolderRegEx which matches 1-3 words: "((\w+)( \w+){0,2})"
sentencePatternTag = None
for tagName in tagNames:
placeHolder = placeHolderFormatString.format(tagName.upper());
if sentencePattern.find(placeHolder)<0:
continue
sentencePattern = sentencePattern.replace(placeHolder, placeHolderRegEx)
sentencePattern = removeMultipleSpaces(sentencePattern) + "$"
sentencePatternTag = tagName
break
assert(sentencePatternTag != None)
sentencePattern = re.compile(sentencePattern)
return(sentencePattern, sentencePatternTag)
#################################################
# processes
# (start process using: p = subprocess.Popen(cmdStr))
#################################################
def isProcessRunning(processID):
status = processID.poll();
if status is None:
return True
else:
return False
def countNumberOfProcessesRunning(processIDs):
return sum([isProcessRunning(p) for p in processIDs])
#################################################
# python environment
#################################################
def clearAll():
#not sure if this is working
sys.modules[__name__].__dict__.clear()
#all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__") and var != "clearall"]
#for var in all:
# print var
# #del globals()[var]
# del var
#################################################
# arguments
#################################################
def printParsedArguments(options):
print "Arguments parsed in using the command line:"
for varName in [v for v in dir(options) if not callable(getattr(options,v)) and v[0] != '_']:
exec('print " %s = "') % varName
exec('print options.%s') % varName
def optionParserSplitListOfValues(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
#################################################
# url
#################################################
def removeURLPrefix(url, urlPrefixes = ["https://www.", "http://www.", "https://", "http://", "www."]):
for urlPrefix in urlPrefixes:
if url.startswith(urlPrefix):
url = url[len(urlPrefix):]
break
return url
def urlsShareSameRoot(url, urlRoot):
url = removeURLPrefix(url)
urlRoot = removeURLPrefix(urlRoot)
if url.startswith(urlRoot):
return True
else:
return False
#################################################
# numpy
#################################################
#def convertListToNumPyArray(list1D, delimiter, columnIndices=None):
# table = []
# for line in list1D:
# row = line.split(delimiter)
# if columnIndices != None:
# row = [row[i] for i in columnIndices]
# table.append(row)
# return np.array(table);
#################################################
# other
#################################################
def printProgressMsg(msgFormatString, currentValue, maxValue, modValue):
if currentValue % modValue == 1:
text = "\r"+msgFormatString.format(currentValue, maxValue) #"\rPercent: [{0}] {1}% {2}".format("#"*block + "-"*(barLength-block), round(progress*100,2), status)
sys.stdout.write(text)
sys.stdout.flush()
def displayProgressBarPrompt(progress, status = ""):
barLength = 30
if isinstance(progress, int):
progress = float(progress)
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format("#"*block + "-"*(barLength-block), round(progress*100,2), status)
sys.stdout.write(text)
sys.stdout.flush()
def numToString(num, length, paddingChar = '0'):
if len(str(num)) >= length:
return str(num)[:length]
else:
return str(num).ljust(length, paddingChar)
def linspace(startVal, stopVal, stepVal):
values = []
counter = 0
newVal = startVal;
while newVal < stopVal:
values.append(newVal)
counter += 1
newVal = startVal+stepVal*counter;
#div = round(1/stepVal)
#newVal = round(newVal * div) / div #avoid floating number precision problems
return(values)
def extractAllNGrams(words,n):
assert(n>0)
startPos = 0
endPos = len(words)-n
return [(words[pos:pos+n]) for pos in range(startPos, endPos+1)]
def runFst(sentences, grammarDir, grammarFileName, fstPath):
assert(os.path.isfile(fstPath))
writeFile(grammarDir + "/grammarInput.txt", sentences)
os.chdir(grammarDir)
#os.system("copy %s %s" % (fstPath, grammarDir))
cmdStr = "%s -f %s" % (fstPath,grammarFileName)
print cmdStr
os.system(cmdStr)
def plotHistogram(list1D, nrBins=100):
import matplotlib.pyplot as plt
plt.hist(list1D, nrBins)
plt.show()
def QCSQueryLabel(qcsQueryLabelPath, modelPath, intputQueriesPath, domainString, variantString, clientIdString):
qcsQueryLabelCmdPattern = "%s -c %s --variant %s -d %s --clientId %s --input %s"
modelPath = modelPath.replace('/','\\')
intputQueriesPath = intputQueriesPath.replace('/','\\')
cmdStr = qcsQueryLabelCmdPattern % (qcsQueryLabelPath, modelPath, variantString, domainString, clientIdString, intputQueriesPath)
print cmdStr
return os.system(cmdStr)
def qasConfigExtractor(qasConfigExtractorPath, qasAllModelsDir, modelString, extractedQasModelDir):
qasConfigExtractorPath = qasConfigExtractorPath.replace('/','\\')
qasAllModelsDir = qasAllModelsDir.replace('/','\\')
extractedQasModelDir = extractedQasModelDir.replace('/','\\')
cmdStr = "%s %s %s %s" % (qasConfigExtractorPath, qasAllModelsDir, extractedQasModelDir, modelString)
print cmdStr
return os.system(cmdStr)
def isAlphabetCharacter(c):
ordinalValue = ord(c)
if (ordinalValue>=65 and ordinalValue<=90) or (ordinalValue>=97 and ordinalValue<=122):
return True
return False
def isNumberCharacter(c):
ordinalValue = ord(c)
if (ordinalValue>=48 and ordinalValue<=57):
return True
return False
#def runFstWithAddedSpaces(sentences, grammarDir, grammarFileName, fstPath):
# #since only match space+dictEntry+space, hence need to add whitespace at begin/end of sentence
# sentencesWithSpace = [" "+s+" " for s in sentences]
# runFst(sentencesWithSpace, grammarDir, grammarFileName, fstPath)
def textNormalizeQuery(query, textNormalizationLUT):
textNormalizeQuery = query[:]
for key,mappedKey in textNormalizationLUT.items():
textNormalizeQuery = textNormalizeQuery.replace(key,mappedKey)
#this is a hack until I understand why loading from different text files
#resulted in differently formatted strings
#try:
#textNormalizeQuery = textNormalizeQuery.decode('latin-1').encode('utf-8')
textNormalizeQuery = textNormalizeQuery.replace(key,mappedKey)
#textNormalizeQuery = textNormalizeQuery.decode('utf-8').encode('latin-1')
#except:
# pass
return textNormalizeQuery
#text normalize queries
def textNormalizeQueries(queries, textNormalizationLUT):
return [textNormalizeQuery(s, textNormalizationLUT) for s in queries]
def combineDictionaries(dictA, dictB):
dict = dictA
for key in dictB:
dict[key] = dictB[key]
return dict
def runConlleval(conllevalInputFile, conllevalDir, cygwinDir):
cmdString = "%s/bin/dos2unix %s" % (cygwinDir,conllevalInputFile)
os.system(cmdString)
cmdString = "%s/bin/perl.exe %s/conlleval.pl -d '\t' < %s" % (cygwinDir,conllevalDir,conllevalInputFile)
print "Executing: " + cmdString
os.system(cmdString)
def inAzure():
return os.path.isfile(r'azuremod.py')
def isString(var):
return isinstance(var, basestring)
def isList(var):
return isinstance(var, list)
def isTuple(var):
return isinstance(var, tuple)
def rotatePoint(point, angleInDegrees, centerPoint = [0,0]):
angleInDegrees = - angleInDegrees #to stay conform with how OpenCVs handles rotation which does counter-clockwise rotation
while angleInDegrees<0:
angleInDegrees += 360
theta = angleInDegrees / 180.0 * pi
ptXNew = cos(theta) * (point[0]-centerPoint[0]) - sin(theta) * (point[1]-centerPoint[1]) + centerPoint[0]
ptYNew = sin(theta) * (point[0]-centerPoint[0]) + cos(theta) * (point[1]-centerPoint[1]) + centerPoint[1]
return [ptXNew, ptYNew]
def avg(list1D):
return sum(list1D, 0.0) / len(list1D)
def pbMax(list1D):
maxVal = max(list1D)
indices = [i for i in range(len(list1D)) if list1D[i] == maxVal]
return maxVal,indices
#this often does not always show the correct size
def showVars(context, maxNrLines = 10**6):
varsDictItems = context.items()
varSizes = []
for varsDictItem in varsDictItems:
obj = varsDictItem[1]
if type(obj) is list:
size = 0
for listItem in obj:
try:
size += listItem.nbytes
except:
size += sys.getsizeof(listItem)
#if varsDictItem[0] == 'feats_test':
# pdb.set_trace()
else:
size = sys.getsizeof(obj)
varSizes.append(size)
#varSizes = [sys.getsizeof(obj) for name,obj in varsDictItems]
dummy, sortOrder = listSort(varSizes, reverseSort = True)
print "{0:10} | {1:30} | {2:100}".format("SIZE", "TYPE", "NAME")
print "="*100
for index in sortOrder[:maxNrLines]:
print "{0:10} | {1:30} | {2:100}".format(varSizes[index], type(varsDictItems[index][1]), varsDictItems[index][0])
| mit |
jseabold/statsmodels | statsmodels/tsa/arima/tests/test_params.py | 5 | 22438 | import numpy as np
import pandas as pd
from numpy.testing import assert_, assert_equal, assert_allclose, assert_raises
from statsmodels.tsa.arima import specification, params
def test_init():
# Test initialization of the params
# Basic test, with 1 of each parameter
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
# Test things copied over from spec
assert_equal(p.spec, spec)
assert_equal(p.exog_names, ['a'])
assert_equal(p.ar_names, ['ar.L1'])
assert_equal(p.ma_names, ['ma.L1'])
assert_equal(p.seasonal_ar_names, ['ar.S.L4'])
assert_equal(p.seasonal_ma_names, ['ma.S.L4'])
assert_equal(p.param_names, ['a', 'ar.L1', 'ma.L1', 'ar.S.L4', 'ma.S.L4',
'sigma2'])
assert_equal(p.k_exog_params, 1)
assert_equal(p.k_ar_params, 1)
assert_equal(p.k_ma_params, 1)
assert_equal(p.k_seasonal_ar_params, 1)
assert_equal(p.k_seasonal_ma_params, 1)
assert_equal(p.k_params, 6)
# Initial parameters should all be NaN
assert_equal(p.params, np.nan)
assert_equal(p.ar_params, [np.nan])
assert_equal(p.ma_params, [np.nan])
assert_equal(p.seasonal_ar_params, [np.nan])
assert_equal(p.seasonal_ma_params, [np.nan])
assert_equal(p.sigma2, np.nan)
assert_equal(p.ar_poly.coef, np.r_[1, np.nan])
assert_equal(p.ma_poly.coef, np.r_[1, np.nan])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, np.nan])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, np.nan])
assert_equal(p.reduced_ar_poly.coef, np.r_[1, [np.nan] * 5])
assert_equal(p.reduced_ma_poly.coef, np.r_[1, [np.nan] * 5])
# Test other properties, methods
assert_(not p.is_complete)
assert_(not p.is_valid)
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
desired = {
'exog_params': [np.nan],
'ar_params': [np.nan],
'ma_params': [np.nan],
'seasonal_ar_params': [np.nan],
'seasonal_ma_params': [np.nan],
'sigma2': np.nan}
assert_equal(p.to_dict(), desired)
desired = pd.Series([np.nan] * spec.k_params, index=spec.param_names)
assert_allclose(p.to_pandas(), desired)
# Test with different numbers of parameters for each
exog = pd.DataFrame([[0, 0]], columns=['a', 'b'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(3, 1, 2), seasonal_order=(5, 1, 6, 4))
p = params.SARIMAXParams(spec=spec)
# No real need to test names here, since they are already tested above for
# the 1-param case, and tested more extensively in test for
# SARIMAXSpecification
assert_equal(p.k_exog_params, 2)
assert_equal(p.k_ar_params, 3)
assert_equal(p.k_ma_params, 2)
assert_equal(p.k_seasonal_ar_params, 5)
assert_equal(p.k_seasonal_ma_params, 6)
assert_equal(p.k_params, 2 + 3 + 2 + 5 + 6 + 1)
def test_set_params_single():
# Test setting parameters directly (i.e. we test setting the AR/MA
# parameters by setting the lag polynomials elsewhere)
# Here each type has only a single parameters
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
def check(is_stationary='raise', is_invertible='raise'):
assert_(not p.is_complete)
assert_(not p.is_valid)
if is_stationary == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
else:
assert_equal(p.is_stationary, is_stationary)
if is_invertible == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
else:
assert_equal(p.is_invertible, is_invertible)
# Set params one at a time, as scalars
p.exog_params = -6.
check()
p.ar_params = -5.
check()
p.ma_params = -4.
check()
p.seasonal_ar_params = -3.
check(is_stationary=False)
p.seasonal_ma_params = -2.
check(is_stationary=False, is_invertible=False)
p.sigma2 = -1.
# Finally, we have a complete set.
assert_(p.is_complete)
# But still not valid
assert_(not p.is_valid)
assert_equal(p.params, [-6, -5, -4, -3, -2, -1])
assert_equal(p.exog_params, [-6])
assert_equal(p.ar_params, [-5])
assert_equal(p.ma_params, [-4])
assert_equal(p.seasonal_ar_params, [-3])
assert_equal(p.seasonal_ma_params, [-2])
assert_equal(p.sigma2, -1.)
# Lag polynomials
assert_equal(p.ar_poly.coef, np.r_[1, 5])
assert_equal(p.ma_poly.coef, np.r_[1, -4])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, 3])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, -2])
# (1 - a L) (1 - b L^4) = (1 - a L - b L^4 + a b L^5)
assert_equal(p.reduced_ar_poly.coef, np.r_[1, 5, 0, 0, 3, 15])
# (1 + a L) (1 + b L^4) = (1 + a L + b L^4 + a b L^5)
assert_equal(p.reduced_ma_poly.coef, np.r_[1, -4, 0, 0, -2, 8])
# Override again, one at a time, now using lists
p.exog_params = [1.]
p.ar_params = [2.]
p.ma_params = [3.]
p.seasonal_ar_params = [4.]
p.seasonal_ma_params = [5.]
p.sigma2 = [6.]
p.params = [1, 2, 3, 4, 5, 6]
assert_equal(p.params, [1, 2, 3, 4, 5, 6])
assert_equal(p.exog_params, [1])
assert_equal(p.ar_params, [2])
assert_equal(p.ma_params, [3])
assert_equal(p.seasonal_ar_params, [4])
assert_equal(p.seasonal_ma_params, [5])
assert_equal(p.sigma2, 6.)
# Override again, one at a time, now using arrays
p.exog_params = np.array(6.)
p.ar_params = np.array(5.)
p.ma_params = np.array(4.)
p.seasonal_ar_params = np.array(3.)
p.seasonal_ma_params = np.array(2.)
p.sigma2 = np.array(1.)
assert_equal(p.params, [6, 5, 4, 3, 2, 1])
assert_equal(p.exog_params, [6])
assert_equal(p.ar_params, [5])
assert_equal(p.ma_params, [4])
assert_equal(p.seasonal_ar_params, [3])
assert_equal(p.seasonal_ma_params, [2])
assert_equal(p.sigma2, 1.)
# Override again, now setting params all at once
p.params = [1, 2, 3, 4, 5, 6]
assert_equal(p.params, [1, 2, 3, 4, 5, 6])
assert_equal(p.exog_params, [1])
assert_equal(p.ar_params, [2])
assert_equal(p.ma_params, [3])
assert_equal(p.seasonal_ar_params, [4])
assert_equal(p.seasonal_ma_params, [5])
assert_equal(p.sigma2, 6.)
# Lag polynomials
assert_equal(p.ar_poly.coef, np.r_[1, -2])
assert_equal(p.ma_poly.coef, np.r_[1, 3])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, -4])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, 5])
# (1 - a L) (1 - b L^4) = (1 - a L - b L^4 + a b L^5)
assert_equal(p.reduced_ar_poly.coef, np.r_[1, -2, 0, 0, -4, 8])
# (1 + a L) (1 + b L^4) = (1 + a L + b L^4 + a b L^5)
assert_equal(p.reduced_ma_poly.coef, np.r_[1, 3, 0, 0, 5, 15])
def test_set_params_single_nonconsecutive():
# Test setting parameters directly (i.e. we test setting the AR/MA
# parameters by setting the lag polynomials elsewhere)
# Here each type has only a single parameters but has non-consecutive
# lag orders
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=([0, 1], 1, [0, 1]),
seasonal_order=([0, 1], 1, [0, 1], 4))
p = params.SARIMAXParams(spec=spec)
def check(is_stationary='raise', is_invertible='raise'):
assert_(not p.is_complete)
assert_(not p.is_valid)
if is_stationary == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
else:
assert_equal(p.is_stationary, is_stationary)
if is_invertible == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
else:
assert_equal(p.is_invertible, is_invertible)
# Set params one at a time, as scalars
p.exog_params = -6.
check()
p.ar_params = -5.
check()
p.ma_params = -4.
check()
p.seasonal_ar_params = -3.
check(is_stationary=False)
p.seasonal_ma_params = -2.
check(is_stationary=False, is_invertible=False)
p.sigma2 = -1.
# Finally, we have a complete set.
assert_(p.is_complete)
# But still not valid
assert_(not p.is_valid)
assert_equal(p.params, [-6, -5, -4, -3, -2, -1])
assert_equal(p.exog_params, [-6])
assert_equal(p.ar_params, [-5])
assert_equal(p.ma_params, [-4])
assert_equal(p.seasonal_ar_params, [-3])
assert_equal(p.seasonal_ma_params, [-2])
assert_equal(p.sigma2, -1.)
# Lag polynomials
assert_equal(p.ar_poly.coef, [1, 0, 5])
assert_equal(p.ma_poly.coef, [1, 0, -4])
assert_equal(p.seasonal_ar_poly.coef, [1, 0, 0, 0, 0, 0, 0, 0, 3])
assert_equal(p.seasonal_ma_poly.coef, [1, 0, 0, 0, 0, 0, 0, 0, -2])
# (1 - a L^2) (1 - b L^8) = (1 - a L^2 - b L^8 + a b L^10)
assert_equal(p.reduced_ar_poly.coef, [1, 0, 5, 0, 0, 0, 0, 0, 3, 0, 15])
# (1 + a L^2) (1 + b L^4) = (1 + a L^2 + b L^8 + a b L^10)
assert_equal(p.reduced_ma_poly.coef, [1, 0, -4, 0, 0, 0, 0, 0, -2, 0, 8])
# Override again, now setting params all at once
p.params = [1, 2, 3, 4, 5, 6]
assert_equal(p.params, [1, 2, 3, 4, 5, 6])
assert_equal(p.exog_params, [1])
assert_equal(p.ar_params, [2])
assert_equal(p.ma_params, [3])
assert_equal(p.seasonal_ar_params, [4])
assert_equal(p.seasonal_ma_params, [5])
assert_equal(p.sigma2, 6.)
# Lag polynomials
assert_equal(p.ar_poly.coef, np.r_[1, 0, -2])
assert_equal(p.ma_poly.coef, np.r_[1, 0, 3])
assert_equal(p.seasonal_ar_poly.coef, [1, 0, 0, 0, 0, 0, 0, 0, -4])
assert_equal(p.seasonal_ma_poly.coef, [1, 0, 0, 0, 0, 0, 0, 0, 5])
# (1 - a L^2) (1 - b L^8) = (1 - a L^2 - b L^8 + a b L^10)
assert_equal(p.reduced_ar_poly.coef, [1, 0, -2, 0, 0, 0, 0, 0, -4, 0, 8])
# (1 + a L^2) (1 + b L^4) = (1 + a L^2 + b L^8 + a b L^10)
assert_equal(p.reduced_ma_poly.coef, [1, 0, 3, 0, 0, 0, 0, 0, 5, 0, 15])
def test_set_params_multiple():
# Test setting parameters directly (i.e. we test setting the AR/MA
# parameters by setting the lag polynomials elsewhere)
# Here each type has multiple a single parameters
exog = pd.DataFrame([[0, 0]], columns=['a', 'b'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(2, 1, 2), seasonal_order=(2, 1, 2, 4))
p = params.SARIMAXParams(spec=spec)
p.params = [-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11]
assert_equal(p.params,
[-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11])
assert_equal(p.exog_params, [-1, 2])
assert_equal(p.ar_params, [-3, 4])
assert_equal(p.ma_params, [-5, 6])
assert_equal(p.seasonal_ar_params, [-7, 8])
assert_equal(p.seasonal_ma_params, [-9, 10])
assert_equal(p.sigma2, -11)
# Lag polynomials
assert_equal(p.ar_poly.coef, np.r_[1, 3, -4])
assert_equal(p.ma_poly.coef, np.r_[1, -5, 6])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, 7, 0, 0, 0, -8])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, -9, 0, 0, 0, 10])
# (1 - a_1 L - a_2 L^2) (1 - b_1 L^4 - b_2 L^8) =
# (1 - b_1 L^4 - b_2 L^8) +
# (-a_1 L + a_1 b_1 L^5 + a_1 b_2 L^9) +
# (-a_2 L^2 + a_2 b_1 L^6 + a_2 b_2 L^10) =
# 1 - a_1 L - a_2 L^2 - b_1 L^4 + a_1 b_1 L^5 +
# a_2 b_1 L^6 - b_2 L^8 + a_1 b_2 L^9 + a_2 b_2 L^10
assert_equal(p.reduced_ar_poly.coef,
[1, 3, -4, 0, 7, (-3 * -7), (4 * -7), 0, -8, (-3 * 8), 4 * 8])
# (1 + a_1 L + a_2 L^2) (1 + b_1 L^4 + b_2 L^8) =
# (1 + b_1 L^4 + b_2 L^8) +
# (a_1 L + a_1 b_1 L^5 + a_1 b_2 L^9) +
# (a_2 L^2 + a_2 b_1 L^6 + a_2 b_2 L^10) =
# 1 + a_1 L + a_2 L^2 + b_1 L^4 + a_1 b_1 L^5 +
# a_2 b_1 L^6 + b_2 L^8 + a_1 b_2 L^9 + a_2 b_2 L^10
assert_equal(p.reduced_ma_poly.coef,
[1, -5, 6, 0, -9, (-5 * -9), (6 * -9),
0, 10, (-5 * 10), (6 * 10)])
def test_set_poly_short_lags():
# Basic example (short lag orders)
exog = pd.DataFrame([[0, 0]], columns=['a', 'b'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
# Valid polynomials
p.ar_poly = [1, -0.5]
assert_equal(p.ar_params, [0.5])
p.ar_poly = np.polynomial.Polynomial([1, -0.55])
assert_equal(p.ar_params, [0.55])
p.ma_poly = [1, 0.3]
assert_equal(p.ma_params, [0.3])
p.ma_poly = np.polynomial.Polynomial([1, 0.35])
assert_equal(p.ma_params, [0.35])
p.seasonal_ar_poly = [1, 0, 0, 0, -0.2]
assert_equal(p.seasonal_ar_params, [0.2])
p.seasonal_ar_poly = np.polynomial.Polynomial([1, 0, 0, 0, -0.25])
assert_equal(p.seasonal_ar_params, [0.25])
p.seasonal_ma_poly = [1, 0, 0, 0, 0.1]
assert_equal(p.seasonal_ma_params, [0.1])
p.seasonal_ma_poly = np.polynomial.Polynomial([1, 0, 0, 0, 0.15])
assert_equal(p.seasonal_ma_params, [0.15])
# Invalid polynomials
# Must have 1 in the initial position
assert_raises(ValueError, p.__setattr__, 'ar_poly', [2, -0.5])
assert_raises(ValueError, p.__setattr__, 'ma_poly', [2, 0.3])
assert_raises(ValueError, p.__setattr__, 'seasonal_ar_poly',
[2, 0, 0, 0, -0.2])
assert_raises(ValueError, p.__setattr__, 'seasonal_ma_poly',
[2, 0, 0, 0, 0.1])
# Too short
assert_raises(ValueError, p.__setattr__, 'ar_poly', 1)
assert_raises(ValueError, p.__setattr__, 'ar_poly', [1])
assert_raises(ValueError, p.__setattr__, 'ma_poly', 1)
assert_raises(ValueError, p.__setattr__, 'ma_poly', [1])
assert_raises(ValueError, p.__setattr__, 'seasonal_ar_poly', 1)
assert_raises(ValueError, p.__setattr__, 'seasonal_ar_poly', [1])
assert_raises(ValueError, p.__setattr__, 'seasonal_ar_poly', [1, 0, 0, 0])
assert_raises(ValueError, p.__setattr__, 'seasonal_ma_poly', 1)
assert_raises(ValueError, p.__setattr__, 'seasonal_ma_poly', [1])
assert_raises(ValueError, p.__setattr__, 'seasonal_ma_poly', [1, 0, 0, 0])
# Too long
assert_raises(ValueError, p.__setattr__, 'ar_poly', [1, -0.5, 0.2])
assert_raises(ValueError, p.__setattr__, 'ma_poly', [1, 0.3, 0.2])
assert_raises(ValueError, p.__setattr__, 'seasonal_ar_poly',
[1, 0, 0, 0, 0.1, 0])
assert_raises(ValueError, p.__setattr__, 'seasonal_ma_poly',
[1, 0, 0, 0, 0.1, 0])
# Number in invalid location (only for seasonal polynomials)
assert_raises(ValueError, p.__setattr__, 'seasonal_ar_poly',
[1, 1, 0, 0, 0, -0.2])
assert_raises(ValueError, p.__setattr__, 'seasonal_ma_poly',
[1, 1, 0, 0, 0, 0.1])
def test_set_poly_short_lags_nonconsecutive():
# Short but non-consecutive lag orders
exog = pd.DataFrame([[0, 0]], columns=['a', 'b'])
spec = specification.SARIMAXSpecification(
exog=exog, order=([0, 1], 1, [0, 1]),
seasonal_order=([0, 1], 1, [0, 1], 4))
p = params.SARIMAXParams(spec=spec)
# Valid polynomials
p.ar_poly = [1, 0, -0.5]
assert_equal(p.ar_params, [0.5])
p.ar_poly = np.polynomial.Polynomial([1, 0, -0.55])
assert_equal(p.ar_params, [0.55])
p.ma_poly = [1, 0, 0.3]
assert_equal(p.ma_params, [0.3])
p.ma_poly = np.polynomial.Polynomial([1, 0, 0.35])
assert_equal(p.ma_params, [0.35])
p.seasonal_ar_poly = [1, 0, 0, 0, 0, 0, 0, 0, -0.2]
assert_equal(p.seasonal_ar_params, [0.2])
p.seasonal_ar_poly = (
np.polynomial.Polynomial([1, 0, 0, 0, 0, 0, 0, 0, -0.25]))
assert_equal(p.seasonal_ar_params, [0.25])
p.seasonal_ma_poly = [1, 0, 0, 0, 0, 0, 0, 0, 0.1]
assert_equal(p.seasonal_ma_params, [0.1])
p.seasonal_ma_poly = (
np.polynomial.Polynomial([1, 0, 0, 0, 0, 0, 0, 0, 0.15]))
assert_equal(p.seasonal_ma_params, [0.15])
# Invalid polynomials
# Number in invalid (i.e. an excluded lag) location
# (now also for non-seasonal polynomials)
assert_raises(ValueError, p.__setattr__, 'ar_poly', [1, 1, -0.5])
assert_raises(ValueError, p.__setattr__, 'ma_poly', [1, 1, 0.3])
assert_raises(ValueError, p.__setattr__, 'seasonal_ar_poly',
[1, 0, 0, 0, 1., 0, 0, 0, -0.2])
assert_raises(ValueError, p.__setattr__, 'seasonal_ma_poly',
[1, 0, 0, 0, 1., 0, 0, 0, 0.1])
def test_set_poly_longer_lags():
# Test with higher order polynomials
exog = pd.DataFrame([[0, 0]], columns=['a', 'b'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(2, 1, 2), seasonal_order=(2, 1, 2, 4))
p = params.SARIMAXParams(spec=spec)
# Setup the non-AR/MA values
p.exog_params = [-1, 2]
p.sigma2 = -11
# Lag polynomials
p.ar_poly = np.r_[1, 3, -4]
p.ma_poly = np.r_[1, -5, 6]
p.seasonal_ar_poly = np.r_[1, 0, 0, 0, 7, 0, 0, 0, -8]
p.seasonal_ma_poly = np.r_[1, 0, 0, 0, -9, 0, 0, 0, 10]
# Test that parameters were set correctly
assert_equal(p.params,
[-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11])
assert_equal(p.exog_params, [-1, 2])
assert_equal(p.ar_params, [-3, 4])
assert_equal(p.ma_params, [-5, 6])
assert_equal(p.seasonal_ar_params, [-7, 8])
assert_equal(p.seasonal_ma_params, [-9, 10])
assert_equal(p.sigma2, -11)
def test_is_stationary():
# Tests for the `is_stationary` property
spec = specification.SARIMAXSpecification(
order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
# Test stationarity
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
p.ar_params = [0.5]
p.seasonal_ar_params = [0]
assert_(p.is_stationary)
p.ar_params = [1.0]
assert_(not p.is_stationary)
p.ar_params = [0]
p.seasonal_ar_params = [0.5]
assert_(p.is_stationary)
p.seasonal_ar_params = [1.0]
assert_(not p.is_stationary)
p.ar_params = [0.2]
p.seasonal_ar_params = [0.2]
assert_(p.is_stationary)
p.ar_params = [0.99]
p.seasonal_ar_params = [0.99]
assert_(p.is_stationary)
p.ar_params = [1.]
p.seasonal_ar_params = [1.]
assert_(not p.is_stationary)
def test_is_invertible():
# Tests for the `is_invertible` property
spec = specification.SARIMAXSpecification(
order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
# Test invertibility
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
p.ma_params = [0.5]
p.seasonal_ma_params = [0]
assert_(p.is_invertible)
p.ma_params = [1.0]
assert_(not p.is_invertible)
p.ma_params = [0]
p.seasonal_ma_params = [0.5]
assert_(p.is_invertible)
p.seasonal_ma_params = [1.0]
assert_(not p.is_invertible)
p.ma_params = [0.2]
p.seasonal_ma_params = [0.2]
assert_(p.is_invertible)
p.ma_params = [0.99]
p.seasonal_ma_params = [0.99]
assert_(p.is_invertible)
p.ma_params = [1.]
p.seasonal_ma_params = [1.]
assert_(not p.is_invertible)
def test_is_valid():
# Additional tests for the `is_valid` property (tests for NaN checks were
# already done in `test_set_params_single`).
spec = specification.SARIMAXSpecification(
order=(1, 1, 1), seasonal_order=(1, 1, 1, 4),
enforce_stationarity=True, enforce_invertibility=True)
p = params.SARIMAXParams(spec=spec)
# Doesn't start out as valid
assert_(not p.is_valid)
# Given stationary / invertible values, it is valid
p.params = [0.5, 0.5, 0.5, 0.5, 1.]
assert_(p.is_valid)
# With either non-stationary or non-invertible values, not valid
p.params = [1., 0.5, 0.5, 0.5, 1.]
assert_(not p.is_valid)
p.params = [0.5, 1., 0.5, 0.5, 1.]
assert_(not p.is_valid)
p.params = [0.5, 0.5, 1., 0.5, 1.]
assert_(not p.is_valid)
p.params = [0.5, 0.5, 0.5, 1., 1.]
assert_(not p.is_valid)
def test_repr_str():
exog = pd.DataFrame([[0, 0]], columns=['a', 'b'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
# Check when we haven't given any parameters
assert_equal(repr(p), 'SARIMAXParams(exog=[nan nan], ar=[nan], ma=[nan],'
' seasonal_ar=[nan], seasonal_ma=[nan], sigma2=nan)')
# assert_equal(str(p), '[nan nan nan nan nan nan nan]')
p.exog_params = [1, 2]
assert_equal(repr(p), 'SARIMAXParams(exog=[1. 2.], ar=[nan], ma=[nan],'
' seasonal_ar=[nan], seasonal_ma=[nan], sigma2=nan)')
# assert_equal(str(p), '[ 1. 2. nan nan nan nan nan]')
p.ar_params = [0.5]
assert_equal(repr(p), 'SARIMAXParams(exog=[1. 2.], ar=[0.5], ma=[nan],'
' seasonal_ar=[nan], seasonal_ma=[nan], sigma2=nan)')
# assert_equal(str(p), '[1. 2. 0.5 nan nan nan nan]')
p.ma_params = [0.2]
assert_equal(repr(p), 'SARIMAXParams(exog=[1. 2.], ar=[0.5], ma=[0.2],'
' seasonal_ar=[nan], seasonal_ma=[nan], sigma2=nan)')
# assert_equal(str(p), '[1. 2. 0.5 0.2 nan nan nan]')
p.seasonal_ar_params = [0.001]
assert_equal(repr(p), 'SARIMAXParams(exog=[1. 2.], ar=[0.5], ma=[0.2],'
' seasonal_ar=[0.001], seasonal_ma=[nan],'
' sigma2=nan)')
# assert_equal(str(p),
# '[1.e+00 2.e+00 5.e-01 2.e-01 1.e-03 nan nan]')
p.seasonal_ma_params = [-0.001]
assert_equal(repr(p), 'SARIMAXParams(exog=[1. 2.], ar=[0.5], ma=[0.2],'
' seasonal_ar=[0.001], seasonal_ma=[-0.001],'
' sigma2=nan)')
# assert_equal(str(p), '[ 1.e+00 2.e+00 5.e-01 2.e-01 1.e-03'
# ' -1.e-03 nan]')
p.sigma2 = 10.123
assert_equal(repr(p), 'SARIMAXParams(exog=[1. 2.], ar=[0.5], ma=[0.2],'
' seasonal_ar=[0.001], seasonal_ma=[-0.001],'
' sigma2=10.123)')
# assert_equal(str(p), '[ 1.0000e+00 2.0000e+00 5.0000e-01 2.0000e-01'
# ' 1.0000e-03 -1.0000e-03\n 1.0123e+01]')
| bsd-3-clause |
zorojean/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
lbishal/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
dusenberrymw/incubator-systemml | src/main/python/systemml/classloader.py | 4 | 7952 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = ['createJavaObject', 'jvm_stdout', 'default_jvm_stdout', 'default_jvm_stdout_parallel_flush', 'set_default_jvm_stdout', 'get_spark_context' ]
import os
import numpy as np
import pandas as pd
import threading, time
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.sql import SparkSession
except ImportError:
raise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
_loadedSystemML = False
def get_spark_context():
"""
Internal method to get already initialized SparkContext. Developers should always use
get_spark_context() instead of SparkContext._active_spark_context to ensure SystemML loaded.
Returns
-------
sc: SparkContext
SparkContext
"""
if SparkContext._active_spark_context is not None:
sc = SparkContext._active_spark_context
global _loadedSystemML
if not _loadedSystemML:
createJavaObject(sc, 'dummy')
_loadedSystemML = True
return sc
else:
raise Exception('Expected spark context to be created.')
_in_jvm_stdout = False
default_jvm_stdout = True
default_jvm_stdout_parallel_flush = True
def set_default_jvm_stdout(enable, parallel_flush=True):
"""
This is useful utility method to get the output of the driver JVM from within a Jupyter notebook
Parameters
----------
enable: boolean
Should flush the stdout by default when mlcontext.execute is invoked
parallel_flush: boolean
Should flush the stdout in parallel
"""
global default_jvm_stdout, default_jvm_stdout_parallel_flush
default_jvm_stdout = enable
default_jvm_stdout_parallel_flush = parallel_flush
# This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
# Example usage:
# with jvm_stdout():
# ml.execute(script)
class jvm_stdout(object):
"""
This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
Parameters
----------
parallel_flush: boolean
Should flush the stdout in parallel
"""
def __init__(self, parallel_flush=False):
self.util = get_spark_context()._jvm.org.apache.sysml.api.ml.Utils()
self.parallel_flush = parallel_flush
self.t = threading.Thread(target=self.flush_stdout)
self.stop = False
def flush_stdout(self):
while not self.stop:
time.sleep(1) # flush stdout every 1 second
str = self.util.flushStdOut()
if str != '':
str = str[:-1] if str.endswith('\n') else str
print(str)
def __enter__(self):
global _in_jvm_stdout
if _in_jvm_stdout:
# Allow for nested jvm_stdout
self.donotRedirect = True
else:
self.donotRedirect = False
self.util.startRedirectStdOut()
if self.parallel_flush:
self.t.start()
_in_jvm_stdout = True
def __exit__(self, *args):
global _in_jvm_stdout
if not self.donotRedirect:
if self.parallel_flush:
self.stop = True
self.t.join()
print(self.util.stopRedirectStdOut())
_in_jvm_stdout = False
_initializedSparkSession = False
def _createJavaObject(sc, obj_type):
# -----------------------------------------------------------------------------------
# Avoids race condition between locking of metastore_db of Scala SparkSession and PySpark SparkSession.
# This is done at toDF() rather than import level to avoid creation of SparkSession in worker processes.
global _initializedSparkSession
if not _initializedSparkSession:
_initializedSparkSession = True
SparkSession.builder.getOrCreate().createDataFrame(pd.DataFrame(np.array([[1,2],[3,4]])))
# -----------------------------------------------------------------------------------
if obj_type == 'mlcontext':
return sc._jvm.org.apache.sysml.api.mlcontext.MLContext(sc._jsc)
elif obj_type == 'dummy':
return sc._jvm.org.apache.sysml.utils.SystemMLLoaderUtils()
else:
raise ValueError('Incorrect usage: supported values: mlcontext or dummy')
def _getJarFileNames(sc):
import imp, fnmatch
jar_file_name = '_ignore.jar'
java_dir = os.path.join(imp.find_module("systemml")[1], "systemml-java")
jar_file_names = []
for file in os.listdir(java_dir):
if fnmatch.fnmatch(file, 'systemml-*-SNAPSHOT.jar') or fnmatch.fnmatch(file, 'systemml-*.jar'):
jar_file_names = jar_file_names + [ os.path.join(java_dir, file) ]
return jar_file_names
def _getLoaderInstance(sc, jar_file_name, className, hint):
err_msg = 'Unable to load systemml-*.jar into current pyspark session.'
if os.path.isfile(jar_file_name):
sc._jsc.addJar(jar_file_name)
jar_file_url = sc._jvm.java.io.File(jar_file_name).toURI().toURL()
url_class = sc._jvm.java.net.URL
jar_file_url_arr = sc._gateway.new_array(url_class, 1)
jar_file_url_arr[0] = jar_file_url
url_class_loader = sc._jvm.java.net.URLClassLoader(jar_file_url_arr, sc._jsc.getClass().getClassLoader())
c1 = sc._jvm.java.lang.Class.forName(className, True, url_class_loader)
return c1.newInstance()
else:
raise ImportError(err_msg + ' Hint: Download the jar from http://systemml.apache.org/download and ' + hint )
def createJavaObject(sc, obj_type):
"""
Performs appropriate check if SystemML.jar is available and returns the handle to MLContext object on JVM
Parameters
----------
sc: SparkContext
SparkContext
obj_type: Type of object to create ('mlcontext' or 'dummy')
"""
try:
return _createJavaObject(sc, obj_type)
except (py4j.protocol.Py4JError, TypeError):
ret = None
err_msg = 'Unable to load systemml-*.jar into current pyspark session.'
hint = 'Provide the following argument to pyspark: --driver-class-path '
jar_file_names = _getJarFileNames(sc)
if len(jar_file_names) != 2:
raise ImportError('Expected only systemml and systemml-extra jars, but found ' + str(jar_file_names))
for jar_file_name in jar_file_names:
if 'extra' in jar_file_name:
x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.api.dl.Caffe2DMLLoader', hint + 'systemml-*-extra.jar')
x.loadCaffe2DML(jar_file_name)
else:
x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.utils.SystemMLLoaderUtils', hint + 'systemml-*.jar')
x.loadSystemML(jar_file_name)
try:
ret = _createJavaObject(sc, obj_type)
except (py4j.protocol.Py4JError, TypeError):
raise ImportError(err_msg + ' Hint: ' + hint + jar_file_name)
return ret
| apache-2.0 |
michaelaye/pyuvis | pyuvis/hsp_sensitivity.py | 1 | 1873 | import pandas as pd
from io import StringIO
data = """1130.00 0.00000
1140.00 0.0270549
1150.00 0.0390556
1160.00 0.0511593
1170.00 0.0560159
1180.00 0.0645439
1190.00 0.0705074
1200.00 0.0778497
1210.00 0.0810836
1220.00 0.0915620
1230.00 0.0993767
1240.00 0.102783
1250.00 0.110385
1260.00 0.114058
1270.00 0.118549
1280.00 0.123220
1290.00 0.123048
1300.00 0.121375
1310.00 0.129776
1320.00 0.133781
1330.00 0.140251
1340.00 0.145286
1350.00 0.143884
1360.00 0.147813
1370.00 0.146146
1380.00 0.144396
1390.00 0.150510
1400.00 0.151467
1410.00 0.149956
1420.00 0.145171
1430.00 0.143898
1440.00 0.136842
1450.00 0.144043
1460.00 0.145993
1470.00 0.149858
1480.00 0.149426
1490.00 0.148534
1500.00 0.148939
1510.00 0.149116
1520.00 0.147436
1530.00 0.146430
1540.00 0.152721
1550.00 0.147942
1560.00 0.133399
1570.00 0.132017
1580.00 0.127328
1590.00 0.112406
1600.00 0.118397
1610.00 0.108828
1620.00 0.130915
1630.00 0.122729
1640.00 0.136309
1650.00 0.130725
1660.00 0.131107
1670.00 0.126259
1680.00 0.119278
1690.00 0.109329
1700.00 0.103477
1710.00 0.0957879
1720.00 0.0880108
1730.00 0.0806759
1740.00 0.0761473
1750.00 0.0680325
1760.00 0.0620168
1770.00 0.0560484
1780.00 0.0500040
1790.00 0.0450547
1800.00 0.0403837
"""
s = StringIO(data)
sens_df = pd.read_fwf(s, names=['wavelength', 'sensitivity'], index_col=0)
| bsd-2-clause |
lin-credible/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
robbymeals/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 142 | 6276 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
peterwilletts24/Python-Scripts | era_interim/era_Water_Balance_Mean.py | 1 | 6659 | import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib.colors import from_levels_and_colors
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from netCDF4 import Dataset
#nc = Dataset('/nfs/a90/eepdw/Data/ERA_Iinterim_Heat_Rad_Fluxes/era_interim_netcdf_heat_rad_flux_evap_precip_6hr_timestep.nc')
nc12 = Dataset('/nfs/a90/eepdw/Data/ERA_Iinterim_Heat_Rad_Fluxes/era_interim_netcdf_heat_rad_flux_evap_precip_00_timestep.nc')
time_min=datetime.datetime(2011,8,18,0,0,0,0)
time_max=datetime.datetime(2011,9,8,0,0,0,0)
min_contour = -15
max_contour = 10
tick_interval=5
lon_max = 116
lon_min = 34
lat_max= 40.
lat_min=-11.25
lon_high_plot = 102
lon_low_plot = 64
lat_high_plot= 30.
lat_low_plot=-10
divisor=10 # for lat/lon rounding
#latent_mean = np.load('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_latent_mean.npy')
# sensible_mean = np.load('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_sensible_mean.npy')
# swave_mean = np.load('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_swave_mean.npy')
# lwave_mean = np.load('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_lwave_mean.npy')
# total_mean = np.load('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_total_mean.npy')
lat = np.load('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_lats.npy')
lon = np.load('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_longs.npy')
lons,lats = np.meshgrid(lon, lat)
hours_since=datetime.datetime(1900,1,1,0,0,0,0)
# Get min and max index positions for latitude and longitude
la_index = np.where((nc12.variables['latitude'][:]<=lat_max) & (nc12.variables['latitude'][:] >= lat_min))
lo_index = np.where((nc12.variables['longitude'][:]<=lon_max) & (nc12.variables['longitude'][:] >= lon_min))
la_i_max = np.max(la_index)
la_i_min = np.min(la_index)
lo_i_max = np.max(lo_index)
lo_i_min = np.min(lo_index)
lat_amounts=la_i_max-la_i_min
lon_amounts=lo_i_max-lo_i_min
print nc12
# Load evaporation and precipitation accumulations (in metres)
# datetimes = np.array([datetime.timedelta(hours=float(i))+hours_since for i in nc.variables['time'][:]])
# time_index= np.where((datetimes<=time_max) & (datetimes >= time_min))
# t_i_max = np.max(time_index)
# t_i_min = np.min(time_index)
#evap_in = nc.variables['e'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]
#precip_in = nc.variables['tp'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]
datetimes = np.array([datetime.timedelta(hours=float(i))+hours_since for i in nc12.variables['time'][:]])
time_index= np.where((datetimes<=time_max) & (datetimes >= time_min))
t_i_max = np.max(time_index)
t_i_min = np.min(time_index)
precip_in = nc12.variables['tp'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]
#evap_in = nc12.variables['e'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]
# Evaporation and precipitaiton fields from ERA-Interim are in 12 hourly accumulations, and in metres, E-P for the EMBRACE models have been calculated in kg/m^2/day) - kg/m^2/day = mm/day - (m * 1000)*(24/12) = kg/m^2/day - based on how much accumulates in that 12 hours, from which the average will be calculated
# By looking at data, it appears that precipitation accumulates for 12 hours -ECMWF site link to documentation broken currently
#evap_mean = np.mean((evap_in * 1000)*(24/12), axis=0, dtype=np.float64)
precip_mean = np.mean((precip_in * 1000)*(24/12), axis=0, dtype=np.float64)
# Evaporation same method as in EMBRACE calculation
latent_mean = np.load('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_latent_mean.npy')
latent_heat_of_vapourisation = 2.5E06
convert_to_kg_m2_day = 86400
evap_rate = (latent_mean/latent_heat_of_vapourisation)*86400
# rain_daily=pcuberain*convert_to_kg_m2_day
waterbalance = evap_rate - precip_mean
m =\
Basemap(llcrnrlon=lon_low_plot,llcrnrlat=lat_low_plot,urcrnrlon=lon_high_plot,urcrnrlat=lat_high_plot,projection='mill', rsphere=6371229)
x, y = m(lons, lats)
fig=plt.figure(figsize=(8,8))
ax = fig.add_axes([0.05,0.05,0.9,0.85])
cmap=plt.cm.RdBu_r
clevs = np.linspace(min_contour, max_contour,256)
midpoint=0
midp = np.mean(np.c_[clevs[:-1], clevs[1:]], axis=1)
vals = np.interp(midp, [min_contour, midpoint, max_contour], [0, 0.5, 1])
cols = plt.cm.RdBu_r(vals)
clevs_extend = np.linspace(min_contour, max_contour,254)
cmap, norm = from_levels_and_colors(clevs_extend, cols, extend='both')
# draw coastlines, state and country boundaries, edge of map.
m.drawcoastlines(linewidth=0.5,color='#262626')
#m.drawstates()
m.drawcountries(linewidth=0.5,color='#262626')
# draw parallels.
parallels = np.arange(0.,90,divisor)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10, color='#262626' )
# draw meridians
meridians = np.arange(0.,360., divisor)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10, color='#262626')
cs_col = m.contourf(x,y, waterbalance, clevs, cmap=cmap, norm=norm, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%")
#cbar.ax.tick_params(labelsize=12, colors='#262626')
ticks= np.arange(int(min_contour),int(max_contour)+tick_interval,tick_interval)
cbar.set_ticks(ticks, update_ticks=True)
cbar.set_ticklabels(([r"${%s}$" % x for x in ticks]))
cbar.set_label('$kgm^{-2}day^{-1}$', fontsize=12, color='#262626')
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/ERA_Interim/Era_Interim_EMBRACE_period_evap_minus_precip_mean_notitle.png', format='png', bbox_inches='tight')
plt.suptitle('ERA-Interim Reanalysis Mean Evaporation - Precipitation for EMBRACE period', fontsize=16, color='#262626')
plt.savefig('/nfs/a90/eepdw/Figures/ERA_Interim/Era_Interim_EMBRACE_period_evap_minus_precip_mean.png', format='png', bbox_inches='tight')
| mit |
trustedanalytics/spark-tk | regression-tests/sparktkregtests/testcases/graph/closeness_centrality_test.py | 6 | 5663 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests closeness centrality algorithm for graphs"""
import unittest
from sparktkregtests.lib import sparktk_test
class ClosenessCentrality(sparktk_test.SparkTKTestCase):
def setUp(self):
edges = self.context.frame.create(
[(0, 1, 1),
(0, 2, 1),
(2, 3, 2),
(2, 4, 4),
(3, 4, 2),
(3, 5, 4),
(4, 5, 2),
(4, 6, 1)],
["src", "dst", "weights"])
vertices = self.context.frame.create(
[[0], [1], [2], [3], [4], [5], [6]], ["id"])
self.graph = self.context.graph.create(vertices, edges)
def test_default(self):
"""Test default settings"""
result_frame = self.graph.closeness_centrality(normalize=False)
result = result_frame.to_pandas()
#validate centrality values
expected_values = {0 : 0.5,
1: 0.0,
2: 0.667,
3: 0.75,
4: 1.0,
5: 0.0,
6: 0.0}
self._validate_result(result, expected_values)
def test_weights_single_shortest_path(self):
"""Tests weighted closeness when only one shortest path present"""
edges = self.context.frame.create(
[(0,1,3), (0, 2, 2),
(0, 3, 6), (0, 4, 4),
(1, 3, 5), (1, 5, 5),
(2, 4, 1), (3, 4, 2),
(3, 5, 1), (4, 5, 4)],
["src", "dst", "weights"])
vertices = self.context.frame.create([[0], [1], [2], [3], [4], [5]], ["id"])
graph = self.context.graph.create(vertices, edges)
#validate centrality values
result_frame = graph.closeness_centrality("weights", False)
result = result_frame.to_pandas()
expected_values = {0 : 0.238,
1: 0.176,
2: 0.333,
3: 0.667,
4: 0.25,
5: 0.0}
self._validate_result(result, expected_values)
def test_weights_multiple_shortest_paths(self):
"""Test centrality when multiple shortest paths exist"""
result_frame = self.graph.closeness_centrality("weights", False)
#validate centrality values
expected_values = {0 : 0.261,
1: 0.0,
2: 0.235,
3: 0.333,
4: 0.667,
5: 0.0,
6: 0.0}
result = result_frame.to_pandas()
self._validate_result(result, expected_values)
def test_disconnected_edges(self):
"""Test closeness on graph with disconnected edges"""
edges = self.context.frame.create(
[['a', 'b'], ['a', 'c'],
['c', 'd'], ['c', 'e'],
['f', 'g'], ['g', 'h']],
['src', 'dst'])
vertices = self.context.frame.create(
[['a'], ['b'], ['c'], ['d'], ['e'], ['f'], ['g'], ['h']],
['id'])
graph = self.context.graph.create(vertices, edges)
result_frame = graph.closeness_centrality(normalize=False)
#validate centrality values
expected_values = {'a': 0.667,
'b': 0.0, 'c': 1.0, 'd': 0.0,
'e': 0.0, 'f': 0.667, 'g': 1.0, 'h':0.0}
result = result_frame.to_pandas()
self._validate_result(result, expected_values)
def test_normalize(self):
"""Test normalized centrality"""
result_frame = self.graph.closeness_centrality(normalize=True)
result = result_frame.to_pandas()
#validate centrality values
expected_values = {0 : 0.5,
1: 0.0,
2: 0.444,
3: 0.375,
4: 0.333,
5: 0.0,
6: 0.0}
self._validate_result(result, expected_values)
def test_negative_edges(self):
"""Test closeness on graph with disconnected edges"""
edges = self.context.frame.create(
[['a', 'b', 10], ['a', 'c', 12],
['c', 'd', -1], ['c', 'e', 5]],
['src', 'dst', 'weight'])
vertices = self.context.frame.create(
[['a'], ['b'], ['c'], ['d'], ['e']],
['id'])
graph = self.context.graph.create(vertices, edges)
with self.assertRaisesRegexp(
Exception, "edge weight cannot be negative"):
graph.closeness_centrality(
edge_weight='weight',
normalize=False)
def test_bad_weights_column_name(self):
"""Should throw exception when bad weights column name given"""
with self.assertRaisesRegexp(
Exception, "Field \"BAD\" does not exist"):
self.graph.closeness_centrality("BAD")
def _validate_result(self, result, expected_values):
for i, row in result.iterrows():
id = row['id']
self.assertAlmostEqual(
row["closeness_centrality"],
expected_values[id],
delta = 0.1)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
giorgiop/scikit-learn | sklearn/ensemble/partial_dependence.py | 25 | 15121 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
hehongliang/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 28 | 12795 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import sys
import argparse
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
FLAGS, unparsed = parser.parse_known_args()
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(
vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0],
reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal(
[vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned "summary" variable.
# Feed metadata variable to session for visualizing the graph in TensorBoard.
_, summary, loss_val = session.run(
[optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(
perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
codorkh/infratopo | atmos_input_files/generate_atmos_files.py | 1 | 2603 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 24 15:42:12 2016
@author: dgreen
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Generating the input atmosphere files for Codor's PE code.
# Codor tells me he only needs the velocity profiles with altitude
# I will rip these out of the NCPA toy model for the test cases
#----------------------------------------------------------------
# Input data
dirstr = '/Users/dgreen/Documents/Work/4codor/atmos_input_files/'
filename = 'NCPA_canonical_profile_zuvwtdp.dat'
prop_azi = 90.0 # Propagation azimuth
isotherm_temp = 273.15+15. # Temperature of isothermal atmos in K
#----------------------------------------------------------------
df = pd.read_csv(filename,skiprows=None,names=['z','u','v','w','t','d','p'],sep=r"\s*")
azirad = prop_azi*np.pi/180.
alongpathwind = (df['u'].values*np.sin(azirad)) + (df['v'].values*np.cos(azirad))
abss = np.sqrt(402.8*df['t'].values) # Adiabatic Sound Speed
ess = alongpathwind+abss # Effective Sound Speed
isoss = np.sqrt(402.8*isotherm_temp)*np.ones(len(abss))
#-------------------------
# Print out to ASCII files
#-------------------------
atmos2deff_file = 'atmos_2d_eff_sound_sp.dat'
atmos2dadiab_file = 'atmos_2d_adiabatic_sound_sp.dat'
atmos2disotherm_file = 'atmos_2d_isothermal_sound_sp.dat'
fe2d = open(dirstr+atmos2deff_file, 'w')
for x in range(len(ess)):
# Write out z in metres (*1000.)
fe2d.write('{:6.0f} {:7.3f}\n'.format(df.iloc[x]['z']*1000.,ess[x]))
fe2d.close()
fa2d = open(dirstr+atmos2dadiab_file, 'w')
for x in range(len(abss)):
# Write out z in metres (*1000.)
fa2d.write('{:6.0f} {:7.3f}\n'.format(df.iloc[x]['z']*1000.,abss[x]))
fa2d.close()
fi2d = open(dirstr+atmos2disotherm_file, 'w')
for x in range(len(isoss)):
# Write out z in metres (*1000.)
fi2d.write('{:6.0f} {:7.3f}\n'.format(df.iloc[x]['z']*1000.,isoss[x]))
fi2d.close()
# Figure to show the three atmospheric profiles
para = {'axes.labelsize': 16, 'text.fontsize': 16, 'legend.fontsize': 14, 'xtick.labelsize': 14,'ytick.labelsize': 14, 'figure.subplot.left': 0.12, 'figure.subplot.right': 0.98, 'figure.subplot.bottom': 0.11, 'figure.subplot.top': 0.97}
plt.rcParams.update(para)
fig = plt.figure(figsize=(6,8))
ax1 = fig.add_axes([0.1,0.1,0.75,0.75])
altv= df['z']*1000.
ax1.plot(ess,altv,'k-',label='Eff. Sd. Sp.')
ax1.plot(abss,altv,'k--',label='Adiab. Sd. Sp.')
ax1.plot(isoss,altv,'k:',label='Iso. Sd. Sp.')
ax1.set_xlabel('Sound Speed (m/s)')
ax1.set_ylabel('Altitude (m)')
ax1.legend(loc=4)
plt.savefig('example_atmospheres.png',bbox_inches='tight')
| mit |
BhallaLab/moose | moose-examples/paper-2015/Fig4_ReacDiff/Fig4B.py | 2 | 7167 | ########################################################################
# This program is copyright (c) Upinder S. Bhalla, NCBS, 2015.
# It is licenced under the GPL 2.1 or higher.
# There is no warranty of any kind. You are welcome to make copies under
# the provisions of the GPL.
# This program builds a multiscale model with a few spines inserted into
# a simplified cellular morphology. Each spine has a signaling model in it
# too. The program doesn't run the model, it just displays it in 3D.
########################################################################
try:
import moogli
except Exception as e:
print( "[INFO ] Could not import moogli. Quitting ..." )
quit()
from PyQt4 import Qt, QtCore, QtGui
import numpy
import time
import pylab
import moose
from moose import neuroml
import matplotlib.pyplot as plt
import sys
import os
from moose.neuroml.ChannelML import ChannelML
sys.path.append('/home/bhalla/moose/trunk/Demos/util')
import rdesigneur as rd
PI = 3.14159265359
useGssa = True
combineSegments = False
baselineTime = 1
tetTime = 1
interTetTime = 0.1
postTetTime = 0.1
ltdTime = 0.1
postLtdTime = 0.1
do3D = True
dt = 0.01
plotdt = 0.1
psdTetCa = 8e-3
basalCa = 0.08e-3
ltdCa = 0.25e-3
def buildRdesigneur():
##################################################################
# Here we define which prototypes are to be loaded in to the system.
# Each specification has the format
# source [localName]
# source can be any of
# filename.extension, # Identify type of file by extension, load it.
# function(), # func( name ) builds object of specified name
# file.py:function() , # load Python file, run function(name) in it.
# moose.Classname # Make obj moose.Classname, assign to name.
# path # Already loaded into library or on path.
# After loading the prototypes, there should be an object called 'name'
# in the library.
##################################################################
cellProto = [ ['ca1_minimal.p', 'elec'] ]
spineProto = [ ['makeSpineProto()', 'spine' ]]
chemProto = [ ['CaMKII_merged77.g', 'chem'] ]
##################################################################
# Here we define what goes where, and any parameters. Each distribution
# has the format
# protoName, path, field, expr, [field, expr]...
# where
# protoName identifies the prototype to be placed on the cell
# path is a MOOSE wildcard path specifying where to put things
# field is the field to assign.
# expr is a math expression to define field value. This uses the
# muParser. Built-in variables are p, g, L, len, dia.
# The muParser provides most math functions, and the Heaviside
# function H(x) = 1 for x > 0 is also provided.
##################################################################
chemRange = "H(1.1e-6 - dia) * H(p - 1300e-6)"
spineDistrib = [ \
["spine", '#apical#', \
"spineSpacing", chemRange + " * 5e-6", \
"spineSpacingDistrib", "1e-6", \
"angle", "0", \
"angleDistrib", "6.28", \
"size", "6", \
"sizeDistrib", "0" ] \
]
chemDistrib = [ \
[ "chem", "#apical#", "install", chemRange ]
]
######################################################################
# Here we define the mappings across scales. Format:
# sourceObj sourceField destObj destField couplingExpr [wildcard][spatialExpn]
# where the coupling expression is anything a muParser can evaluate,
# using the input variable x. For example: 8e-5 + 300*x
# For now, let's use existing adaptors which take an offset and scale.
######################################################################
adaptorList = [
[ 'Ca_conc', 'Ca', 'psd/Ca_input', 'concInit', 8e-5, 1 ],
[ 'Ca_conc', 'Ca', 'dend/DEND/Ca_input', 'concInit', 8e-5, 1 ],
[ 'psd/tot_PSD_R', 'n', 'glu', 'Gbar', 0, 0.01 ],
]
######################################################################
# Having defined everything, now to create the rdesigneur and proceed
# with creating the model.
######################################################################
rdes = rd.rdesigneur(
useGssa = useGssa, \
combineSegments = combineSegments, \
stealCellFromLibrary = True, \
spineDistrib = spineDistrib, \
chemDistrib = chemDistrib, \
cellProto = cellProto, \
spineProto = spineProto, \
chemProto = chemProto
)
return rdes
def createVmViewer(rdes):
network = moogli.extensions.moose.read(rdes.elecid.path)
normalizer = moogli.utilities.normalizer(-0.08,
0.02,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(0.0,
0.5,
1.0,
1.0),
moogli.colors.Color(1.0,
0.0,
0.0,
0.9)])
mapper = moogli.utilities.mapper(colormap, normalizer)
def prelude(view):
vms = [moose.element(x).Vm for x in list(network.shapes.keys())]
network.set("color", vms, mapper)
view.pitch(PI/2.0)
view.down(450)
view.left(100)
view.h = 2.0
view.zoom(5.0)
def interlude(view):
if view.h > 0.10:
view.h /= 1.005
view.zoom(0.005)
view.yaw(0.01)
viewer = moogli.Viewer("vm-viewer")
viewer.attach_shapes(list(network.shapes.values()))
view = moogli.View("vm-view",
prelude=prelude,
interlude=interlude)
viewer.attach_view(view)
return viewer
def main():
numpy.random.seed( 1234 )
rdes = buildRdesigneur()
rdes.buildModel( '/model' )
assert( moose.exists( '/model' ) )
moose.element( '/model/elec/hsolve' ).tick = -1
for i in range( 10, 18 ):
moose.setClock( i, dt )
moose.setClock( 18, plotdt )
moose.reinit()
if do3D:
app = QtGui.QApplication(sys.argv)
compts = moose.wildcardFind( "/model/elec/#[ISA=CompartmentBase]" )
print(("LEN = ", len( compts )))
for i in compts:
n = i.name[:4]
if ( n == 'head' or n == 'shaf' ):
i.diameter *= 1.0
i.Vm = 0.02
else:
i.diameter *= 4.0
i.Vm = -0.05
vm_viewer = createVmViewer(rdes)
vm_viewer.showMaximized()
vm_viewer.start()
app.exec_()
if __name__ == '__main__':
main()
| gpl-3.0 |
GaZ3ll3/numpy | numpy/lib/function_base.py | 5 | 132305 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` corresponds to the
number of observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = float(X.shape[1] - ddof)
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
return (dot(X, X_T.conj())/fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no affect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
spmaniato/LTLMoP | doc/conf.py | 7 | 7828 | # -*- coding: utf-8 -*-
#
# LTLMoP documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 30 19:27:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../src'))
sys.path.append(os.path.abspath('../src/lib'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'LTLMoP'
copyright = u'2006-2014, Cameron Finucane, Gangyuan (Jim) Jing, Hadas Kress-Gazit, et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'LTLMoPdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'LTLMoP.tex', u'LTLMoP Documentation',
u'Cameron Finucane, Gangyuan (Jim) Jing, Hadas Kress-Gazit', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
##########################
#### below is dependency workaround code from http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
import sys
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
self.eps = 0 # hack for numpy
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['wx', 'wx.richtext', 'wx.stc', 'wx.grid', 'wx.lib',
'wx.lib.buttons', 'wx.lib.intctrl', 'wxversion', 'Polygon', 'Polygon.Shapes',
'Polygon.IO', 'Polygon.Utils', 'playerc', 'numpy', 'numpy.ma', 'numpy.matlib',
'scipy', 'scipy.linalg', 'scipy.optimize', 'ompl', 'roslib', 'rospy', 'gazebo',
'gazebo.srv', 'matplotlib', 'matplotlib.pyplot', 'matplotlib.cbook', 'std_msgs',
'std_msgs.msg', 'tf', 'tf.transformations', 'matplotlib.backends',
'matplotlib.backends.backend_tkagg', 'matplotlib.figure', 'matplotlib.axes',
'matplotlib.transforms', 'pycudd']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
| gpl-3.0 |
alejob/mdanalysis | testsuite/MDAnalysisTests/analysis/test_persistencelength.py | 1 | 3953 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import print_function
import MDAnalysis
from MDAnalysis.analysis import polymer
from MDAnalysis.exceptions import NoDataError
import numpy as np
from numpy.testing import (
assert_,
assert_almost_equal,
assert_raises,
dec
)
from MDAnalysisTests.datafiles import Plength
from MDAnalysisTests import module_not_found
class TestPersistenceLength(object):
def setUp(self):
self.u = MDAnalysis.Universe(Plength)
def tearDown(self):
del self.u
def test_ag_VE(self):
ags = [self.u.atoms[:10], self.u.atoms[10:110]]
assert_raises(ValueError, polymer.PersistenceLength, ags)
def _make_p(self):
ags = [r.atoms.select_atoms('name C* N*')
for r in self.u.residues]
p = polymer.PersistenceLength(ags)
return p
def test_run(self):
p = self._make_p()
p.run()
assert_(len(p.results) == 280)
assert_almost_equal(p.lb, 1.485, 3)
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def test_fit(self):
p = self._make_p()
p.run()
p.perform_fit()
assert_almost_equal(p.lp, 6.504, 3)
assert_(len(p.fit) == len(p.results))
@dec.skipif(module_not_found('matplotlib'),
"Test skipped because matplotlib is not available.")
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def test_plot_ax_return(self):
'''Ensure that a matplotlib axis object is
returned when plot() is called.'''
import matplotlib
p = self._make_p()
p.run()
p.perform_fit()
actual = p.plot()
expected = matplotlib.axes.Axes
assert_(isinstance(actual, expected))
def test_raise_NoDataError(self):
'''Ensure that a NoDataError is raised if
perform_fit() is called before the run()
method of AnalysisBase.'''
p = self._make_p()
assert_raises(NoDataError, p.perform_fit)
class TestFitExponential(object):
def setUp(self):
self.x = np.linspace(0, 250, 251)
self.a_ref = 20.0
self.y = np.exp(-self.x/self.a_ref)
def tearDown(self):
del self.x
del self.a_ref
del self.y
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def test_fit_simple(self):
a = polymer.fit_exponential_decay(self.x, self.y)
assert_(a == self.a_ref)
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def test_fit_noisy(self):
noise = np.sin(self.x) * 0.01
y2 = noise + self.y
a = polymer.fit_exponential_decay(self.x, y2)
assert_almost_equal(a, self.a_ref, decimal=3)
#assert_(np.rint(a) == self.a_ref)
| gpl-2.0 |
gst-group/apriori_demo | apriori.py | 1 | 6853 | """
Description : Simple Python implementation of the Apriori Algorithm
Usage:
$python apriori.py -f DATASET.csv -s minSupport -c minConfidence
$python apriori.py -f DATASET.csv -s 0.15 -c 0.6
"""
import sys
from itertools import chain, combinations
from collections import defaultdict
from optparse import OptionParser
import pandas as pd
import numpy as np
def subsets(arr):
""" Returns non empty subsets of arr"""
return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])
def returnItemsWithMinSupport(itemSet, transactionList, minSupport, freqSet):
"""calculates the support for items in the itemSet and returns a subset
of the itemSet each of whose elements satisfies the minimum support"""
_itemSet = set()
localSet = defaultdict(int)
for item in itemSet:
for transaction in transactionList:
if item.issubset(transaction):
freqSet[item] += 1
localSet[item] += 1
for item, count in localSet.items():
support = float(count)/len(transactionList)
if support >= minSupport:
_itemSet.add(item)
return _itemSet
def joinSet(itemSet, length):
"""Join a set with itself and returns the n-element itemsets"""
return set([i.union(j) for i in itemSet for j in itemSet if len(i.union(j)) == length])
def getItemSetTransactionList(data_iterator):
transactionList = list()
itemSet = set()
for record in data_iterator:
transaction = frozenset(record)
transactionList.append(transaction)
for item in transaction:
itemSet.add(frozenset([item])) # Generate 1-itemSets
return itemSet, transactionList
def runApriori(data_iter, minSupport, minConfidence,total = 1):
"""
run the apriori algorithm. data_iter is a record iterator
Return both:
- items (tuple, support)
- rules ((pretuple, posttuple), confidence)
"""
itemSet, transactionList = getItemSetTransactionList(data_iter)
freqSet = defaultdict(int)
largeSet = dict()
# Global dictionary which stores (key=n-itemSets,value=support)
# which satisfy minSupport
assocRules = dict()
# Dictionary which stores Association Rules
oneCSet = returnItemsWithMinSupport(itemSet,
transactionList,
minSupport,
freqSet)
currentLSet = oneCSet
k = 2
while(currentLSet != set([])):
largeSet[k-1] = currentLSet
currentLSet = joinSet(currentLSet, k)
currentCSet = returnItemsWithMinSupport(currentLSet,
transactionList,
minSupport,
freqSet)
currentLSet = currentCSet
k = k + 1
def getSupport(item):
"""local function which Returns the support of an item"""
return float(freqSet[item])/len(transactionList)
toRetItems = []
for key, value in largeSet.items():
toRetItems.extend([(tuple(item),len(tuple(item)), int(getSupport(item)*total))
for item in value])
toRetRules = []
for key, value in largeSet.items()[1:]:
for item in value:
_subsets = map(frozenset, [x for x in subsets(item)])
for element in _subsets:
remain = item.difference(element)
if len(remain) > 0:
confidence = getSupport(item)/getSupport(element)
if confidence >= minConfidence:
toRetRules.append(((tuple(element), tuple(remain)),
confidence))
return toRetItems, toRetRules
def decode_tuple_zh(item):
for i in item:
if result is not '':
result = result + ',' + i
else:
result = result + i
result.rstrip(',')
return result
def ExportResults(items, rules):
"""prints the generated itemsets sorted by support and the confidence rules sorted by confidence"""
# for item, support in sorted(items, key=lambda (item, support): support):
#
# #print type(item)
# pass
# # print "%s" % (decode_tuple_zh(item))
# #print support
# # df = pd.DataFrame(decode_tuple_zh(item))
# # df.to_csv('text.csv',index=False)
# # print "\n------------------------ RULES:"
# # for rule, confidence in sorted(rules, key=lambda (rule, confidence): confidence):
# # pre, post = rule
# # print "Rule: %s ==> %s , %.3f" % (decode_tuple_zh(pre), decode_tuple_zh(post), confidence)
# #print type(items)
items = np.asarray(items)
df = pd.DataFrame(items)
df.columns = ['FrequentWords','Size','Frequency']
#df.sort(columns='Size',ascending=True)
df.sort_values(by=['Size','Frequency'],ascending=True)
df.to_csv("data.csv",index=False,encoding="utf_8_sig")
def dataFromFile(fname):
"""Function which reads from the file and yields a generator"""
file_iter = open(fname, 'rU')
for line in file_iter:
# print(line) #python 3
print line #python 2
line = line.strip().rstrip(',') # Remove trailing comma
record = frozenset(line.split(','))
yield record
if __name__ == "__main__":
optparser = OptionParser()
optparser.add_option('-f', '--inputFile',
dest='input',
help='filename containing csv',
default=None)
optparser.add_option('-s', '--minSupport',
dest='minS',
help='minimum support value',
default=0.15,
type='float')
optparser.add_option('-c', '--minConfidence',
dest='minC',
help='minimum confidence value',
default=0.6,
type='float')
(options, args) = optparser.parse_args()
inFile = None
if options.input is None:
inFile = sys.stdin
elif options.input is not None:
inFile = dataFromFile(options.input)
else:
# print('No dataset filename specified, system with exit\n') #python 3
print 'No dataset filename specified, system with exit\n' #python 2
sys.exit('System will exit')
minSupport = options.minS
minConfidence = options.minC
items, rules = runApriori(inFile, minSupport, minConfidence,300)
ExportResults(items, rules)
| mit |
arabenjamin/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
AnasGhrab/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
lakshayg/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 86 | 4503 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
TypeError: `shuffle` is not bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be explicitly set as boolean; '
'got {}'.format(shuffle))
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
yavalvas/yav_com | build/matplotlib/examples/pylab_examples/broken_axis.py | 12 | 2508 | """
Broken axis example, where the y-axis will have a portion cut out.
"""
import matplotlib.pylab as plt
import numpy as np
# 30 points between 0 0.2] originally made using np.random.rand(30)*.2
pts = np.array([ 0.015, 0.166, 0.133, 0.159, 0.041, 0.024, 0.195,
0.039, 0.161, 0.018, 0.143, 0.056, 0.125, 0.096, 0.094, 0.051,
0.043, 0.021, 0.138, 0.075, 0.109, 0.195, 0.05 , 0.074, 0.079,
0.155, 0.02 , 0.01 , 0.061, 0.008])
# Now let's make two outlier points which are far away from everything.
pts[[3,14]] += .8
# If we were to simply plot pts, we'd lose most of the interesting
# details due to the outliers. So let's 'break' or 'cut-out' the y-axis
# into two portions - use the top (ax) for the outliers, and the bottom
# (ax2) for the details of the majority of our data
f,(ax,ax2) = plt.subplots(2,1,sharex=True)
# plot the same data on both axes
ax.plot(pts)
ax2.plot(pts)
# zoom-in / limit the view to different portions of the data
ax.set_ylim(.78,1.) # outliers only
ax2.set_ylim(0,.22) # most of the data
# hide the spines between ax and ax2
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
ax.tick_params(labeltop='off') # don't put tick labels at the top
ax2.xaxis.tick_bottom()
# This looks pretty good, and was fairly painless, but you can get that
# cut-out diagonal lines look with just a bit more work. The important
# thing to know here is that in axes coordinates, which are always
# between 0-1, spine endpoints are at these locations (0,0), (0,1),
# (1,0), and (1,1). Thus, we just need to put the diagonals in the
# appropriate corners of each of our axes, and so long as we use the
# right transform and disable clipping.
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d,+d),(-d,+d), **kwargs) # top-left diagonal
ax.plot((1-d,1+d),(-d,+d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d,+d),(1-d,1+d), **kwargs) # bottom-left diagonal
ax2.plot((1-d,1+d),(1-d,1+d), **kwargs) # bottom-right diagonal
# What's cool about this is that now if we vary the distance between
# ax and ax2 via f.subplots_adjust(hspace=...) or plt.subplot_tool(),
# the diagonal lines will move accordingly, and stay right at the tips
# of the spines they are 'breaking'
plt.show()
| mit |
pleonex/Eva | object recognition/examples/common.py | 2 | 8891 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Eva - Object recogniton
# Copyright (C) 2014 Rafael Bailón-Ruiz <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# By downloading, copying, installing or using the software you agree to this license.
# If you do not agree to this license, do not download, install,
# copy or use the software.
#
#
# License Agreement
# For Open Source Computer Vision Library
# (3-clause BSD License)
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the names of the copyright holders nor the names of the contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# This software is provided by the copyright holders and contributors "as is" and
# any express or implied warranties, including, but not limited to, the implied
# warranties of merchantability and fitness for a particular purpose are disclaimed.
# In no event shall copyright holders or contributors be liable for any direct,
# indirect, incidental, special, exemplary, or consequential damages
# (including, but not limited to, procurement of substitute goods or services;
# loss of use, data, or profits; or business interruption) however caused
# and on any theory of liability, whether in contract, strict liability,
# or tort (including negligence or otherwise) arising in any way out of
# the use of this software, even if advised of the possibility of such damage.
'''
This module contais some common routines used by other samples.
'''
import numpy as np
import cv2
import os
from contextlib import contextmanager
import itertools as it
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
else:
self.prev_pt = None
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
@contextmanager
def Timer(msg):
print msg, '...',
start = clock()
try:
yield
finally:
print "%.2f ms" % ((clock()-start)*1000)
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
| gpl-3.0 |
Ledoux/ShareYourSystem | Pythonlogy/draft/Simulaters/Brianer/draft/01_ExampleCell copy 2.py | 2 | 1268 |
#ImportModules
import ShareYourSystem as SYS
from ShareYourSystem.Specials.Simulaters import Populater,Brianer
#Definition of a brian structure
MyBrianer=Brianer.BrianerClass(
).push(
[
(
'First',
Populater.PopulaterClass().update(
[
('PopulatingUnitsInt',3),
(
'PopulatingEquationStr',
'''
dv/dt = (10-(v+50))/(20*ms) : volt
'''
)
('MoniteringTrackTuplesList',
[
('State','v',[0,1],1.)
]
),
('ConnectingCatchGetStrsList',
[
'/NodePointDeriveNoder/<Connectome>SecondRater'
]
),
('ConnectingGraspClueVariablesList',
[
'/NodePointDeriveNoder/<Connectome>SecondRater'
]
)
]
)
),
(
'Second',
Rater.RaterClass().update(
[
('PopulatingUnitsInt',1)
]
)
)
],
**{
'CollectingCollectionStr':'Connectome'
}
).run(2.)
#Definition the AttestedStr
SYS._attest(
[
'MyBrianer is '+SYS._str(
MyBrianer,
**{
'RepresentingBaseKeyStrsList':False,
'RepresentingAlineaIsBool':False
}
),
]
)
#SYS._print(MyBrianer.BrianedMonitorsList[0].__dict__)
SYS._print(
MyBrianer.BrianedNeuronGroupsList[0].__dict__
)
#import matplotlib
#plot(MyBrianer['<Connectome>FirstRater'].)
#Print
| mit |
fspaolo/scikit-learn | sklearn/utils/tests/test_random.py | 20 | 3872 | from __future__ import division
import numpy as np
from scipy.misc import comb as combinations
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/api/compound_path.py | 1 | 1660 | """
=============
Compound path
=============
Make a compound path -- in this case two simple polygons, a rectangle
and a triangle. Use CLOSEPOLY and MOVETO for the different parts of
the compound path
"""
import numpy as np
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
vertices = []
codes = []
codes = [Path.MOVETO] + [Path.LINETO]*3 + [Path.CLOSEPOLY]
vertices = [(1, 1), (1, 2), (2, 2), (2, 1), (0, 0)]
codes += [Path.MOVETO] + [Path.LINETO]*2 + [Path.CLOSEPOLY]
vertices += [(4, 4), (5, 5), (5, 4), (0, 0)]
vertices = np.array(vertices, float)
path = Path(vertices, codes)
pathpatch = PathPatch(path, facecolor='None', edgecolor='green')
fig, ax = plt.subplots()
ax.add_patch(pathpatch)
ax.set_title('A compound path')
ax.dataLim.update_from_data_xy(vertices)
ax.autoscale_view()
pltshow(plt)
| mit |
poryfly/scikit-learn | examples/classification/plot_lda_qda.py | 164 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
jjbrin/trading-with-python | cookbook/reconstructVXX/downloadVixFutures.py | 77 | 3012 | #-------------------------------------------------------------------------------
# Name: download CBOE futures
# Purpose: get VIX futures data from CBOE, process data to a single file
#
#
# Created: 15-10-2011
# Copyright: (c) Jev Kuznetsov 2011
# Licence: BSD
#-------------------------------------------------------------------------------
#!/usr/bin/env python
from urllib import urlretrieve
import os
from pandas import *
import datetime
import numpy as np
m_codes = ['F','G','H','J','K','M','N','Q','U','V','X','Z'] #month codes of the futures
codes = dict(zip(m_codes,range(1,len(m_codes)+1)))
#dataDir = os.path.dirname(__file__)+'/data'
dataDir = os.path.expanduser('~')+'/twpData/vixFutures'
print 'Data directory: ', dataDir
def saveVixFutureData(year,month, path, forceDownload=False):
''' Get future from CBOE and save to file '''
fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:])
if os.path.exists(path+'\\'+fName) or forceDownload:
print 'File already downloaded, skipping'
return
urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName)
print 'Getting: %s' % urlStr
try:
urlretrieve(urlStr,path+'\\'+fName)
except Exception as e:
print e
def buildDataTable(dataDir):
""" create single data sheet """
files = os.listdir(dataDir)
data = {}
for fName in files:
print 'Processing: ', fName
try:
df = DataFrame.from_csv(dataDir+'/'+fName)
code = fName.split('.')[0].split('_')[1]
month = '%02d' % codes[code[0]]
year = '20'+code[1:]
newCode = year+'_'+month
data[newCode] = df
except Exception as e:
print 'Could not process:', e
full = DataFrame()
for k,df in data.iteritems():
s = df['Settle']
s.name = k
s[s<5] = np.nan
if len(s.dropna())>0:
full = full.join(s,how='outer')
else:
print s.name, ': Empty dataset.'
full[full<5]=np.nan
full = full[sorted(full.columns)]
# use only data after this date
startDate = datetime.datetime(2008,1,1)
idx = full.index >= startDate
full = full.ix[idx,:]
#full.plot(ax=gca())
fName = os.path.expanduser('~')+'/twpData/vix_futures.csv'
print 'Saving to ', fName
full.to_csv(fName)
if __name__ == '__main__':
if not os.path.exists(dataDir):
print 'creating data directory %s' % dataDir
os.makedirs(dataDir)
for year in range(2008,2013):
for month in range(12):
print 'Getting data for {0}/{1}'.format(year,month+1)
saveVixFutureData(year,month,dataDir)
print 'Raw wata was saved to {0}'.format(dataDir)
buildDataTable(dataDir) | bsd-3-clause |
andaag/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 114 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
ilo10/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
janhahne/nest-simulator | pynest/examples/Potjans_2014/helpers.py | 1 | 14760 | # -*- coding: utf-8 -*-
#
# helpers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Pynest microcircuit helpers
---------------------------
Helper functions for the simulation and evaluation of the microcircuit.
Authors
~~~~~~~~
Hendrik Rothe, Hannah Bos, Sacha van Albada; May 2016
"""
import numpy as np
import os
import sys
if 'DISPLAY' not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
def compute_DC(net_dict, w_ext):
""" Computes DC input if no Poisson input is provided to the microcircuit.
Parameters
----------
net_dict
Parameters of the microcircuit.
w_ext
Weight of external connections.
Returns
-------
DC
DC input, which compensates lacking Poisson input.
"""
DC = (
net_dict['bg_rate'] * net_dict['K_ext'] *
w_ext * net_dict['neuron_params']['tau_syn_E'] * 0.001
)
return DC
def get_weight(PSP_val, net_dict):
""" Computes weight to elicit a change in the membrane potential.
This function computes the weight which elicits a change in the membrane
potential of size PSP_val. To implement this, the weight is calculated to
elicit a current that is high enough to implement the desired change in the
membrane potential.
Parameters
----------
PSP_val
Evoked postsynaptic potential.
net_dict
Dictionary containing parameters of the microcircuit.
Returns
-------
PSC_e
Weight value(s).
"""
C_m = net_dict['neuron_params']['C_m']
tau_m = net_dict['neuron_params']['tau_m']
tau_syn_ex = net_dict['neuron_params']['tau_syn_ex']
PSC_e_over_PSP_e = (((C_m) ** (-1) * tau_m * tau_syn_ex / (
tau_syn_ex - tau_m) * ((tau_m / tau_syn_ex) ** (
- tau_m / (tau_m - tau_syn_ex)) - (tau_m / tau_syn_ex) ** (
- tau_syn_ex / (tau_m - tau_syn_ex)))) ** (-1))
PSC_e = (PSC_e_over_PSP_e * PSP_val)
return PSC_e
def get_total_number_of_synapses(net_dict):
""" Returns the total number of synapses between all populations.
The first index (rows) of the output matrix is the target population
and the second (columns) the source population. If a scaling of the
synapses is intended this is done in the main simulation script and the
variable 'K_scaling' is ignored in this function.
Parameters
----------
net_dict
Dictionary containing parameters of the microcircuit.
N_full
Number of neurons in all populations.
number_N
Total number of populations.
conn_probs
Connection probabilities of the eight populations.
scaling
Factor that scales the number of neurons.
Returns
-------
K
Total number of synapses with
dimensions [len(populations), len(populations)].
"""
N_full = net_dict['N_full']
number_N = len(N_full)
conn_probs = net_dict['conn_probs']
scaling = net_dict['N_scaling']
prod = np.outer(N_full, N_full)
n_syn_temp = np.log(1. - conn_probs)/np.log((prod - 1.) / prod)
N_full_matrix = np.column_stack(
(N_full for i in list(range(number_N)))
)
# If the network is scaled the indegrees are calculated in the same
# fashion as in the original version of the circuit, which is
# written in sli.
K = (((n_syn_temp * (
N_full_matrix * scaling).astype(int)) / N_full_matrix).astype(int))
return K
def synapses_th_matrix(net_dict, stim_dict):
""" Computes number of synapses between thalamus and microcircuit.
This function ignores the variable, which scales the number of synapses.
If this is intended the scaling is performed in the main simulation script.
Parameters
----------
net_dict
Dictionary containing parameters of the microcircuit.
stim_dict
Dictionary containing parameters of stimulation settings.
N_full
Number of neurons in the eight populations.
number_N
Total number of populations.
conn_probs
Connection probabilities of the thalamus to the eight populations.
scaling
Factor that scales the number of neurons.
T_full
Number of thalamic neurons.
Returns
-------
K
Total number of synapses.
"""
N_full = net_dict['N_full']
number_N = len(N_full)
scaling = net_dict['N_scaling']
conn_probs = stim_dict['conn_probs_th']
T_full = stim_dict['n_thal']
prod = (T_full * N_full).astype(float)
n_syn_temp = np.log(1. - conn_probs)/np.log((prod - 1.)/prod)
K = (((n_syn_temp * (N_full * scaling).astype(int))/N_full).astype(int))
return K
def adj_w_ext_to_K(K_full, K_scaling, w, w_from_PSP, DC, net_dict, stim_dict):
""" Adjustment of weights to scaling is performed.
The recurrent and external weights are adjusted to the scaling
of the indegrees. Extra DC input is added to compensate the scaling
and preserve the mean and variance of the input.
Parameters
----------
K_full
Total number of connections between the eight populations.
K_scaling
Scaling factor for the connections.
w
Weight matrix of the connections of the eight populations.
w_from_PSP
Weight of the external connections.
DC
DC input to the eight populations.
net_dict
Dictionary containing parameters of the microcircuit.
stim_dict
Dictionary containing stimulation parameters.
tau_syn_E
Time constant of the external postsynaptic excitatory current.
full_mean_rates
Mean rates of the eight populations in the full scale version.
K_ext
Number of external connections to the eight populations.
bg_rate
Rate of the Poissonian spike generator.
Returns
-------
w_new
Adjusted weight matrix.
w_ext_new
Adjusted external weight.
I_ext
Extra DC input.
"""
tau_syn_E = net_dict['neuron_params']['tau_syn_E']
full_mean_rates = net_dict['full_mean_rates']
w_mean = w_from_PSP
K_ext = net_dict['K_ext']
bg_rate = net_dict['bg_rate']
w_new = w / np.sqrt(K_scaling)
I_ext = np.zeros(len(net_dict['populations']))
x1_all = w * K_full * full_mean_rates
x1_sum = np.sum(x1_all, axis=1)
if net_dict['poisson_input']:
x1_ext = w_mean * K_ext * bg_rate
w_ext_new = w_mean / np.sqrt(K_scaling)
I_ext = 0.001 * tau_syn_E * (
(1. - np.sqrt(K_scaling)) * x1_sum + (
1. - np.sqrt(K_scaling)) * x1_ext) + DC
else:
w_ext_new = w_from_PSP / np.sqrt(K_scaling)
I_ext = 0.001 * tau_syn_E * (
(1. - np.sqrt(K_scaling)) * x1_sum) + DC
return w_new, w_ext_new, I_ext
def read_name(path, name):
""" Reads names and ids of spike detector.
The names of the spike detectors are gathered and the lowest and
highest id of each spike detector is computed. If the simulation was
run on several threads or mpi-processes, one name per spike detector
per mpi-process/thread is extracted.
Parameters
------------
path
Path where the spike detector files are stored.
name
Name of the spike detector.
Returns
-------
files
Name of all spike detectors, which are located in the path.
node_ids
Lowest and highest ids of the spike detectors.
"""
# Import filenames$
files = []
for file in os.listdir(path):
if file.endswith('.gdf') and file.startswith(name):
temp = file.split('-')[0] + '-' + file.split('-')[1]
if temp not in files:
files.append(temp)
# Import node IDs
node_idfile = open(path + 'population_nodeids.dat', 'r')
node_ids = []
for l in node_idfile:
a = l.split()
node_ids.append([int(a[0]), int(a[1])])
files = sorted(files)
return files, node_ids
def load_spike_times(path, name, begin, end):
""" Loads spike times of each spike detector.
Parameters
-----------
path
Path where the files with the spike times are stored.
name
Name of the spike detector.
begin
Lower boundary value to load spike times.
end
Upper boundary value to load spike times.
Returns
-------
data
Dictionary containing spike times in the interval from 'begin'
to 'end'.
"""
files, node_ids = read_name(path, name)
data = {}
for i in list(range(len(files))):
all_names = os.listdir(path)
temp3 = [
all_names[x] for x in list(range(len(all_names)))
if all_names[x].endswith('gdf') and
all_names[x].startswith('spike') and
(all_names[x].split('-')[0] + '-' + all_names[x].split('-')[1]) in
files[i]
]
data_temp = [np.loadtxt(os.path.join(path, f)) for f in temp3]
data_concatenated = np.concatenate(data_temp)
data_raw = data_concatenated[np.argsort(data_concatenated[:, 1])]
idx = ((data_raw[:, 1] > begin) * (data_raw[:, 1] < end))
data[i] = data_raw[idx]
return data
def plot_raster(path, name, begin, end):
""" Creates a spike raster plot of the microcircuit.
Parameters
-----------
path
Path where the spike times are stored.
name
Name of the spike detector.
begin
Initial value of spike times to plot.
end
Final value of spike times to plot.
Returns
-------
None
"""
files, node_ids = read_name(path, name)
data_all = load_spike_times(path, name, begin, end)
highest_node_id = node_ids[-1][-1]
node_ids_numpy = np.asarray(node_ids)
node_ids_numpy_changed = abs(node_ids_numpy - highest_node_id) + 1
L23_label_pos = (node_ids_numpy_changed[0][0] + node_ids_numpy_changed[1][1])/2
L4_label_pos = (node_ids_numpy_changed[2][0] + node_ids_numpy_changed[3][1])/2
L5_label_pos = (node_ids_numpy_changed[4][0] + node_ids_numpy_changed[5][1])/2
L6_label_pos = (node_ids_numpy_changed[6][0] + node_ids_numpy_changed[7][1])/2
ylabels = ['L23', 'L4', 'L5', 'L6']
color_list = [
'#000000', '#888888', '#000000', '#888888',
'#000000', '#888888', '#000000', '#888888'
]
Fig1 = plt.figure(1, figsize=(8, 6))
for i in list(range(len(files))):
times = data_all[i][:, 1]
neurons = np.abs(data_all[i][:, 0] - highest_node_id) + 1
plt.plot(times, neurons, '.', color=color_list[i])
plt.xlabel('time [ms]', fontsize=18)
plt.xticks(fontsize=18)
plt.yticks(
[L23_label_pos, L4_label_pos, L5_label_pos, L6_label_pos],
ylabels, rotation=10, fontsize=18
)
plt.savefig(os.path.join(path, 'raster_plot.png'), dpi=300)
plt.show()
def fire_rate(path, name, begin, end):
""" Computes firing rate and standard deviation of it.
The firing rate of each neuron for each population is computed and stored
in a numpy file in the directory of the spike detectors. The mean firing
rate and its standard deviation is displayed for each population.
Parameters
-----------
path
Path where the spike times are stored.
name
Name of the spike detector.
begin
Initial value of spike times to calculate the firing rate.
end
Final value of spike times to calculate the firing rate.
Returns
-------
None
"""
files, node_ids = read_name(path, name)
data_all = load_spike_times(path, name, begin, end)
rates_averaged_all = []
rates_std_all = []
for h in list(range(len(files))):
n_fil = data_all[h][:, 0]
n_fil = n_fil.astype(int)
count_of_n = np.bincount(n_fil)
count_of_n_fil = count_of_n[node_ids[h][0]-1:node_ids[h][1]]
rate_each_n = count_of_n_fil * 1000. / (end - begin)
rate_averaged = np.mean(rate_each_n)
rate_std = np.std(rate_each_n)
rates_averaged_all.append(float('%.3f' % rate_averaged))
rates_std_all.append(float('%.3f' % rate_std))
np.save(os.path.join(path, ('rate' + str(h) + '.npy')), rate_each_n)
print('Mean rates: %r Hz' % rates_averaged_all)
print('Standard deviation of rates: %r Hz' % rates_std_all)
def boxplot(net_dict, path):
""" Creates a boxblot of the firing rates of the eight populations.
To create the boxplot, the firing rates of each population need to be
computed with the function 'fire_rate'.
Parameters
-----------
net_dict
Dictionary containing parameters of the microcircuit.
path
Path were the firing rates are stored.
Returns
-------
None
"""
pops = net_dict['N_full']
reversed_order_list = list(range(len(pops) - 1, -1, -1))
list_rates_rev = []
for h in reversed_order_list:
list_rates_rev.append(
np.load(os.path.join(path, ('rate' + str(h) + '.npy')))
)
pop_names = net_dict['populations']
label_pos = list(range(len(pops), 0, -1))
color_list = ['#888888', '#000000']
medianprops = dict(linestyle='-', linewidth=2.5, color='firebrick')
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(list_rates_rev, 0, 'rs', 0, medianprops=medianprops)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
for h in list(range(len(pops))):
boxX = []
boxY = []
box = bp['boxes'][h]
for j in list(range(5)):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = list(zip(boxX, boxY))
k = h % 2
boxPolygon = Polygon(boxCoords, facecolor=color_list[k])
ax1.add_patch(boxPolygon)
plt.xlabel('firing rate [Hz]', fontsize=18)
plt.yticks(label_pos, pop_names, fontsize=18)
plt.xticks(fontsize=18)
plt.savefig(os.path.join(path, 'box_plot.png'), dpi=300)
plt.show()
| gpl-2.0 |
jreback/pandas | pandas/tests/tseries/offsets/test_business_day.py | 1 | 14570 | """
Tests for offsets.BDay
"""
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs.offsets import ApplyTypeError, BDay, BMonthEnd, CDay
from pandas.compat.numpy import np_datetime64_compat
from pandas import DatetimeIndex, _testing as tm, read_pickle
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class TestBusinessDay(Base):
_offset = BDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset1 = self.offset
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<BusinessDay>"
assert repr(self.offset2) == "<2 * BusinessDays>"
expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_with_offset_index(self):
dti = DatetimeIndex([self.d])
result = dti + (self.offset + timedelta(hours=2))
expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
tm.assert_index_equal(result, expected)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 1, 3)
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert BDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
def testRollforward1(self):
assert BDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert BDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_is_on_offset(self):
tests = [
(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False),
]
for offset, d, expected in tests:
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
BDay(),
{
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8),
},
),
(
2 * BDay(),
{
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9),
},
),
(
-BDay(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7),
},
),
(
-2 * BDay(),
{
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7),
},
),
(
BDay(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
assert result == datetime(2012, 11, 6)
result = dt + BDay(100) - BDay(100)
assert result == dt
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
msg = "Only know how to combine business day with datetime or timedelta"
with pytest.raises(ApplyTypeError, match=msg):
BDay().apply(BMonthEnd())
class TestCustomBusinessDay(Base):
_offset = CDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.nd = np_datetime64_compat("2008-01-01 00:00:00Z")
self.offset = CDay()
self.offset1 = self.offset
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessDay>"
assert repr(self.offset2) == "<2 * CustomBusinessDays>"
expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_with_offset_index(self):
dti = DatetimeIndex([self.d])
result = dti + (self.offset + timedelta(hours=2))
expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
tm.assert_index_equal(result, expected)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 1, 3)
assert self.offset2(self.nd) == datetime(2008, 1, 3)
def testRollback1(self):
assert CDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert CDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
def testRollforward1(self):
assert CDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert CDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
def test_roll_date_object(self):
offset = CDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CDay(), datetime(2008, 1, 1), True),
(CDay(), datetime(2008, 1, 5), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CDay(),
{
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8),
},
),
(
2 * CDay(),
{
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9),
},
),
(
-CDay(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7),
},
),
(
-2 * CDay(),
{
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7),
},
),
(
CDay(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CDay(10)
assert result == datetime(2012, 11, 6)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
def test_apply_corner(self):
msg = (
"Only know how to combine trading day "
"with datetime, datetime64 or timedelta"
)
with pytest.raises(ApplyTypeError, match=msg):
CDay().apply(BMonthEnd())
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
tday = CDay(holidays=holidays)
for year in range(2012, 2015):
dt = datetime(year, 4, 30)
xp = datetime(year, 5, 2)
rs = dt + tday
assert rs == xp
def test_weekmask(self):
weekmask_saudi = "Sat Sun Mon Tue Wed" # Thu-Fri Weekend
weekmask_uae = "1111001" # Fri-Sat Weekend
weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
bday_saudi = CDay(weekmask=weekmask_saudi)
bday_uae = CDay(weekmask=weekmask_uae)
bday_egypt = CDay(weekmask=weekmask_egypt)
dt = datetime(2013, 5, 1)
xp_saudi = datetime(2013, 5, 4)
xp_uae = datetime(2013, 5, 2)
xp_egypt = datetime(2013, 5, 2)
assert xp_saudi == dt + bday_saudi
assert xp_uae == dt + bday_uae
assert xp_egypt == dt + bday_egypt
xp2 = datetime(2013, 5, 5)
assert xp2 == dt + 2 * bday_saudi
assert xp2 == dt + 2 * bday_uae
assert xp2 == dt + 2 * bday_egypt
def test_weekmask_and_holidays(self):
weekmask_egypt = "Sun Mon Tue Wed Thu" # Fri-Sat Weekend
holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
xp_egypt = datetime(2013, 5, 5)
assert xp_egypt == dt + 2 * bday_egypt
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_calendar(self):
calendar = USFederalHolidayCalendar()
dt = datetime(2014, 1, 17)
assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self.offset)
_check_roundtrip(self.offset2)
_check_roundtrip(self.offset * 2)
def test_pickle_compat_0_14_1(self, datapath):
hdays = [datetime(2013, 1, 1) for ele in range(4)]
pth = datapath("tseries", "offsets", "data", "cday-0.14.1.pickle")
cday0_14_1 = read_pickle(pth)
cday = CDay(holidays=hdays)
assert cday == cday0_14_1
| bsd-3-clause |
deepzot/bashes | examples/g3post.py | 1 | 3853 | #!/usr/bin/env python
import argparse
import os.path
import numpy as np
import matplotlib.pyplot as plt
import galsim
import bashes
def main():
# Parse command-line args.
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--load', type = str, default = 'bashes',
help = 'base filename of g3bashes job we will post process')
parser.add_argument('--grid', type = int, default = 0,
help = 'number of stamp/prior combinations to show as a grid of plots')
parser.add_argument('--verbose', action = 'store_true',
help = 'be verbose about progress')
parser.add_argument('--save', type = str, default = None,
help = 'base name for saving ouput')
args = parser.parse_args()
# Load the config data for the g3bashes job we are post processing.
config = bashes.config.load(args.load)
saveBase = config['args']['save']
# Reconstruct the shear grid used by the estimator.
ng = config['args']['ng']
gmax = config['args']['gmax']
dg = np.linspace(-gmax,+gmax,ng)
g1 = config['args']['g1_center'] + dg
g2 = config['args']['g2_center'] + dg
# Prepare the shear grid edges needed by pcolormesh.
g1e,g2e = bashes.utility.getBinEdges(g1,g2)
# Initialize matplotlib.
if args.grid:
gridFig = plt.figure('fig1',figsize=(12,12))
gridFig.set_facecolor('white')
plt.subplots_adjust(left=0.02,bottom=0.02,right=0.98,top=0.98,wspace=0.05,hspace=0.05)
# Allocate memory for the full NLL grid over all priors.
nstamps = config['args']['nstamps']
nll = np.empty((ng*ng,nstamps,nstamps))
# Load this array from the NLL grids saved for each prior.
for iprior in range(nstamps):
# Load the estimator results for this prior.
loadName = '%s_%d.npy' % (saveBase,iprior)
if not os.path.exists(loadName):
print 'Skipping missing results for prior %d in %r' % (i,loadName)
continue
nll[:,:,iprior] = np.load(loadName)
# Marginalize over priors for each data stamp.
nllData = np.empty((ng*ng,nstamps))
for idata in range(nstamps):
for ig in range(ng*ng):
nllData[ig,idata] = bashes.Estimator.marginalize(nll[ig,idata])
# Sum the NLL over data stamps, assuming that the same constant shear is applied.
nllTotal = np.sum(nllData,axis=1)
nllTotalMin = np.min(nllTotal)
# Define a shear plot helper.
nllLevels = bashes.utility.getDeltaChiSq()
def plotShearNLL(nll):
nllShear = nll.reshape((ng,ng))
nllShearMin = np.min(nllShear)
plt.pcolormesh(g1e,g2e,nllShear,cmap='rainbow',vmin=0.,vmax=nllLevels[-1])
plt.contour(g1,g2,nllShear,levels=nllLevels,colors='w',linestyles=('-','--',':'))
# Remove tick labels.
axes = plt.gca()
axes.xaxis.set_ticklabels([])
axes.yaxis.set_ticklabels([])
# Draw a grid of shear NLL values if requested.
if args.grid:
# Show the shear grid for each stamp,prior pair.
for iprior in range(args.grid):
for idata in range(args.grid):
plt.subplot(args.grid+1,args.grid+1,iprior*(args.grid+1)+idata+1)
plotShearNLL(nll[:,idata,iprior]-np.min(nllData[:,idata]))
# Show the shear grid marginalized over priors for each data stamp.
for idata in range(args.grid):
plt.subplot(args.grid+1,args.grid+1,args.grid*(args.grid+1)+idata+1)
plotShearNLL(nllData[:,idata]-np.min(nllData[:,idata]))
# Show the combined NLL assuming constant shear.
plt.subplot(args.grid+1,args.grid+1,(args.grid+1)**2)
plotShearNLL(nllTotal-nllTotalMin)
if args.save:
plt.savefig(args.save)
plt.show()
if __name__ == '__main__':
main()
| mit |
numenta/NAB | nab/detectors/numenta/nab/runner.py | 2 | 3981 | # ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import multiprocessing
import os
import pandas
try:
import simplejson as json
except ImportError:
import json
from nab.corpus import Corpus
from nab.detectors.base import detectDataSet
from nab.labeler import CorpusLabel
class Runner(object):
"""
Class to run detection on the NAB benchmark using the specified set of
profiles and/or detectors.
"""
def __init__(self,
dataDir,
resultsDir,
labelPath,
profilesPath,
numCPUs=None):
"""
@param dataDir (string) Directory where all the raw datasets exist.
@param resultsDir (string) Directory where the detector anomaly scores
will be scored.
@param labelPath (string) Path where the labels of the datasets
exist.
@param profilesPath (string) Path to JSON file containing application
profiles and associated cost matrices.
@param numCPUs (int) Number of CPUs to be used for calls to
multiprocessing.pool.map
"""
self.dataDir = dataDir
self.resultsDir = resultsDir
self.labelPath = labelPath
self.profilesPath = profilesPath
self.pool = multiprocessing.Pool(numCPUs)
self.probationaryPercent = 0.15
self.windowSize = 0.10
self.corpus = None
self.corpusLabel = None
self.profiles = None
def initialize(self):
"""Initialize all the relevant objects for the run."""
self.corpus = Corpus(self.dataDir)
self.corpusLabel = CorpusLabel(path=self.labelPath, corpus=self.corpus)
with open(self.profilesPath) as p:
self.profiles = json.load(p)
def detect(self, detectors):
"""Generate results file given a dictionary of detector classes
Function that takes a set of detectors and a corpus of data and creates a
set of files storing the alerts and anomaly scores given by the detectors
@param detectors (dict) Dictionary with key value pairs of a
detector name and its corresponding
class constructor.
"""
print "\nRunning detection step"
count = 0
args = []
for detectorName, detectorConstructor in detectors.iteritems():
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
if self.corpusLabel.labels.has_key(relativePath):
args.append(
(
count,
detectorConstructor(
dataSet=dataSet,
probationaryPercent=self.probationaryPercent),
detectorName,
self.corpusLabel.labels[relativePath]["label"],
self.resultsDir,
relativePath
)
)
count += 1
# Using `map_async` instead of `map` so interrupts are properly handled.
# See: http://stackoverflow.com/a/1408476
self.pool.map_async(detectDataSet, args).get(999999)
| agpl-3.0 |
Koheron/zynq-sdk | examples/alpha250/adc-dac-dma/test.py | 1 | 2163 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
from koheron import command, connect
import matplotlib.pyplot as plt
import numpy as np
class AdcDacDma(object):
def __init__(self, client):
self.n = 8*1024*1024
self.client = client
self.dac = np.zeros((self.n))
self.adc = np.zeros((self.n))
@command()
def select_adc_channel(self, channel):
pass
@command()
def set_dac_data(self, data):
pass
def set_dac(self, warning=False, reset=False):
if warning:
if np.max(np.abs(self.dac)) >= 1:
print('WARNING : dac out of bounds')
dac_data = np.uint32(np.mod(np.floor(32768 * self.dac) + 32768, 65536) + 32768)
self.set_dac_data(dac_data[::2] + 65536 * dac_data[1::2])
@command()
def start_dma(self):
pass
@command()
def stop_dma(self):
pass
@command()
def get_adc_data(self):
return self.client.recv_array(self.n/2, dtype='uint32')
def get_adc(self):
data = self.get_adc_data()
self.adc[::2] = (np.int32(data % 65536) - 32768) % 65536 - 32768
self.adc[1::2] = (np.int32(data >> 16) - 32768) % 65536 - 32768
if __name__=="__main__":
host = os.getenv('HOST','192.168.1.16')
client = connect(host, name='adc-dac-dma')
driver = AdcDacDma(client)
adc_channel = 0
driver.select_adc_channel(adc_channel)
fs = 250e6
fmin = 1e3 # Hz
fmax = 1e6 # Hz
t = np.arange(driver.n) / fs
chirp = (fmax-fmin)/(t[-1]-t[0])
print("Set DAC waveform (chirp between {} and {} MHz)".format(1e-6*fmin, 1e-6*fmax))
driver.dac = 0.9 * np.cos(2*np.pi * (fmin + chirp * t) * t)
driver.set_dac()
fs = 250e6
n_avg = 10
adc = np.zeros(driver.n)
print("Get ADC{} data ({} points)".format(adc_channel, driver.n))
driver.start_dma()
driver.get_adc()
driver.stop_dma()
n_pts = 1000000
print("Plot first {} points".format(n_pts))
plt.plot(1e6 * t[0:n_pts], driver.adc[0:n_pts])
plt.ylim((-2**15, 2**15))
plt.xlabel('Time (us)')
plt.ylabel('ADC Raw data')
plt.show() | mit |
procoder317/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
theislab/scanpy | scanpy/plotting/_preprocessing.py | 1 | 4222 | from typing import Optional, Union
import numpy as np
import pandas as pd
from matplotlib import pyplot as pl
from matplotlib import rcParams
from anndata import AnnData
from . import _utils
# --------------------------------------------------------------------------------
# Plot result of preprocessing functions
# --------------------------------------------------------------------------------
def highly_variable_genes(
adata_or_result: Union[AnnData, pd.DataFrame, np.recarray],
log: bool = False,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
highly_variable_genes: bool = True,
):
"""Plot dispersions or normalized variance versus means for genes.
Produces Supp. Fig. 5c of Zheng et al. (2017) and MeanVarPlot() and
VariableFeaturePlot() of Seurat.
Parameters
----------
adata
Result of :func:`~scanpy.pp.highly_variable_genes`.
log
Plot on logarithmic axes.
show
Show the plot, do not return axis.
save
If `True` or a `str`, save the figure.
A string is appended to the default filename.
Infer the filetype if ending on {{`'.pdf'`, `'.png'`, `'.svg'`}}.
"""
if isinstance(adata_or_result, AnnData):
result = adata_or_result.var
seurat_v3_flavor = adata_or_result.uns["hvg"]["flavor"] == "seurat_v3"
else:
result = adata_or_result
if isinstance(result, pd.DataFrame):
seurat_v3_flavor = "variances_norm" in result.columns
else:
seurat_v3_flavor = False
if highly_variable_genes:
gene_subset = result.highly_variable
else:
gene_subset = result.gene_subset
means = result.means
if seurat_v3_flavor:
var_or_disp = result.variances
var_or_disp_norm = result.variances_norm
else:
var_or_disp = result.dispersions
var_or_disp_norm = result.dispersions_norm
size = rcParams['figure.figsize']
pl.figure(figsize=(2 * size[0], size[1]))
pl.subplots_adjust(wspace=0.3)
for idx, d in enumerate([var_or_disp_norm, var_or_disp]):
pl.subplot(1, 2, idx + 1)
for label, color, mask in zip(
['highly variable genes', 'other genes'],
['black', 'grey'],
[gene_subset, ~gene_subset],
):
if False:
means_, var_or_disps_ = np.log10(means[mask]), np.log10(d[mask])
else:
means_, var_or_disps_ = means[mask], d[mask]
pl.scatter(means_, var_or_disps_, label=label, c=color, s=1)
if log: # there's a bug in autoscale
pl.xscale('log')
pl.yscale('log')
y_min = np.min(var_or_disp)
y_min = 0.95 * y_min if y_min > 0 else 1e-1
pl.xlim(0.95 * np.min(means), 1.05 * np.max(means))
pl.ylim(y_min, 1.05 * np.max(var_or_disp))
if idx == 0:
pl.legend()
pl.xlabel(('$log_{10}$ ' if False else '') + 'mean expressions of genes')
data_type = 'dispersions' if not seurat_v3_flavor else 'variances'
pl.ylabel(
('$log_{10}$ ' if False else '')
+ '{} of genes'.format(data_type)
+ (' (normalized)' if idx == 0 else ' (not normalized)')
)
_utils.savefig_or_show('filter_genes_dispersion', show=show, save=save)
if show is False:
return pl.gca()
# backwards compat
def filter_genes_dispersion(
result: np.recarray,
log: bool = False,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
):
"""\
Plot dispersions versus means for genes.
Produces Supp. Fig. 5c of Zheng et al. (2017) and MeanVarPlot() of Seurat.
Parameters
----------
result
Result of :func:`~scanpy.pp.filter_genes_dispersion`.
log
Plot on logarithmic axes.
show
Show the plot, do not return axis.
save
If `True` or a `str`, save the figure.
A string is appended to the default filename.
Infer the filetype if ending on {{`'.pdf'`, `'.png'`, `'.svg'`}}.
"""
highly_variable_genes(
result, log=log, show=show, save=save, highly_variable_genes=False
)
| bsd-3-clause |
mgraupe/acq4 | acq4/pyqtgraph/exporters/Matplotlib.py | 39 | 4821 | from ..Qt import QtGui, QtCore
from .Exporter import Exporter
from .. import PlotItem
from .. import functions as fn
__all__ = ['MatplotlibExporter']
"""
It is helpful when using the matplotlib Exporter if your
.matplotlib/matplotlibrc file is configured appropriately.
The following are suggested for getting usable PDF output that
can be edited in Illustrator, etc.
backend : Qt4Agg
text.usetex : True # Assumes you have a findable LaTeX installation
interactive : False
font.family : sans-serif
font.sans-serif : 'Arial' # (make first in list)
mathtext.default : sf
figure.facecolor : white # personal preference
# next setting allows pdf font to be readable in Adobe Illustrator
pdf.fonttype : 42 # set fonts to TrueType (otherwise it will be 3
# and the text will be vectorized.
text.dvipnghack : True # primarily to clean up font appearance on Mac
The advantage is that there is less to do to get an exported file cleaned and ready for
publication. Fonts are not vectorized (outlined), and window colors are white.
"""
class MatplotlibExporter(Exporter):
Name = "Matplotlib Window"
windows = []
def __init__(self, item):
Exporter.__init__(self, item)
def parameters(self):
return None
def cleanAxes(self, axl):
if type(axl) is not list:
axl = [axl]
for ax in axl:
if ax is None:
continue
for loc, spine in ax.spines.iteritems():
if loc in ['left', 'bottom']:
pass
elif loc in ['right', 'top']:
spine.set_color('none')
# do not draw the spine
else:
raise ValueError('Unknown spine location: %s' % loc)
# turn off ticks when there is no spine
ax.xaxis.set_ticks_position('bottom')
def export(self, fileName=None):
if isinstance(self.item, PlotItem):
mpw = MatplotlibWindow()
MatplotlibExporter.windows.append(mpw)
stdFont = 'Arial'
fig = mpw.getFigure()
# get labels from the graphic item
xlabel = self.item.axes['bottom']['item'].label.toPlainText()
ylabel = self.item.axes['left']['item'].label.toPlainText()
title = self.item.titleLabel.text
ax = fig.add_subplot(111, title=title)
ax.clear()
self.cleanAxes(ax)
#ax.grid(True)
for item in self.item.curves:
x, y = item.getData()
opts = item.opts
pen = fn.mkPen(opts['pen'])
if pen.style() == QtCore.Qt.NoPen:
linestyle = ''
else:
linestyle = '-'
color = tuple([c/255. for c in fn.colorTuple(pen.color())])
symbol = opts['symbol']
if symbol == 't':
symbol = '^'
symbolPen = fn.mkPen(opts['symbolPen'])
symbolBrush = fn.mkBrush(opts['symbolBrush'])
markeredgecolor = tuple([c/255. for c in fn.colorTuple(symbolPen.color())])
markerfacecolor = tuple([c/255. for c in fn.colorTuple(symbolBrush.color())])
markersize = opts['symbolSize']
if opts['fillLevel'] is not None and opts['fillBrush'] is not None:
fillBrush = fn.mkBrush(opts['fillBrush'])
fillcolor = tuple([c/255. for c in fn.colorTuple(fillBrush.color())])
ax.fill_between(x=x, y1=y, y2=opts['fillLevel'], facecolor=fillcolor)
pl = ax.plot(x, y, marker=symbol, color=color, linewidth=pen.width(),
linestyle=linestyle, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor,
markersize=markersize)
xr, yr = self.item.viewRange()
ax.set_xbound(*xr)
ax.set_ybound(*yr)
ax.set_xlabel(xlabel) # place the labels.
ax.set_ylabel(ylabel)
mpw.draw()
else:
raise Exception("Matplotlib export currently only works with plot items")
MatplotlibExporter.register()
class MatplotlibWindow(QtGui.QMainWindow):
def __init__(self):
from ..widgets import MatplotlibWidget
QtGui.QMainWindow.__init__(self)
self.mpl = MatplotlibWidget.MatplotlibWidget()
self.setCentralWidget(self.mpl)
self.show()
def __getattr__(self, attr):
return getattr(self.mpl, attr)
def closeEvent(self, ev):
MatplotlibExporter.windows.remove(self)
| mit |
yihaochen/FLASHtools | grid_analysis/gridanalysis_entropy_ratio.py | 1 | 2327 | #!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import os.path as op
import util
import yt
import MPI_taskpull2
from yt_cluster_ratio_fields import *
yt.mylog.setLevel("ERROR")
# Scan for files
dirs = ['/home/ychen/data/0only_1022_h1_10Myr/']
regex = 'MHD_Jet*_hdf5_plt_cnt_????'
#regex = 'MHD_Jet*_hdf5_plt_cnt_[0-9][0-9][0-9][0-9]'
ratios = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
#ratios = [0.6]
def rescan(dir, printlist=False):
files = util.scan_files(dir, regex=regex, walk=True, reverse=False)
return files
def worker_fn(dirname, filepath):
ds = yt.load(filepath)
center = [0.0,0.0,0.0]
radius = (200, 'kpc')
sp = ds.sphere(center, radius)
masses = []
for ratio in ratios:
low_entropy = sp.cut_region(["obj['entropy_ratio'] < %.1f" % ratio])
masses.append(sum(low_entropy['cell_mass'].in_units('Msun')))
return (int(ds.basename[-4:]), ds.current_time.in_units('Myr')) + tuple(masses)
def tasks_gen(dirs):
for dir in dirs:
files = rescan(dir)
for file in reversed(files[:]):
yield file.pathname, file.fullpath
tasks = tasks_gen(dirs)
results = MPI_taskpull2.taskpull(worker_fn, tasks, print_result=True)
if results:
collected = {}
for key, item in results.items():
dirname, fname = key
if 'restart' in dirname:
dirname = op.dirname(dirname) + '/'
if dirname in collected:
collected[dirname].append(item)
else:
collected[dirname] = [item]
for key, item in collected.items():
collected[key] = sorted(item)
#picklename = time.strftime("Bflux_table_%Y%m%d_%H%M%S.pickle")
#pickle.dump(collected, open( picklename, "wb" ))
fmt = '%04d %6.3f' + ' %e'*len(ratios)
header = 'filenumber, t(Myr), ' + 'mass(Msun)'*len(ratios)
for dirname in collected.keys():
#print np.asarray(collected[dirname])
if dirname.strip('/').split('/')[-1] == 'data':
savedir = op.dirname(dirname)
else:
savedir = dirname
np.savetxt(dirname+'/gridanalysis_entropy_ratio.txt', np.asarray(collected[dirname]), fmt=fmt, header=header)
#if MPI_taskpull2.rank == 0:
# for key, item in results.items():
# print item
| gpl-2.0 |
bbcdli/xuexi | fenlei_tf/script_2019Nov/src/version1/tensor_train.py | 2 | 92004 | # originally by Hamed, 25Apr.2016
# hy:Changes by Haiyan, 21Dec.2016 v0.45
# sudo apt-get install python-h5py
# Added evaluation function for multiple models, their result file names contain calculated mAP.
# Added functionality to set different dropout rate for each layer for 3conv net
# Moved auxiliary functions to a new file tools.py
# Added function to obtain images of estimated receptive fields/active fields
# Added function to save all models and specified names according to training status
# Added graph 3conv, 4conv
# Added real batch training functionality
# Added functionality of feeding a tensor name
# Added function to save tensorflow models with max precision for a class, not overwritten by following data
# Added function do_crop2_parts to get parts in different sizes
# Added function for displaying evaluation results in a worksheet (result_for_table = 0).
# Added similarity.py to analyse similarity between classes, CAD sampls and camera test images
# Created tensor_cnn_evaluate.py. It is used for testing multiple models. Input of each evaluation function includes:
# session,num_class,img_list,_labels
# Added stop condition to avoid overfitting
# Added function to load two models of different graphs. requirement: install tensorflow version > 0.8, numpy > 1.11.2
# Added display of all defined results for training, validation and test in one graph in tensorboard
# Added optimizer Adam and its parameters
# Added display of test result in RETRAIN
# Added a function to add more training data during a training. This data contains random noise.
# Added display of test result in CONTINUE_TRAIN. Some new variables are created for tensorflow for this purpose.
# Created a function for importing data, import_data(). This is used for displaying test result parallel to validation result.
# Added function to evaluate two models of same graph
# Added adaptive testing - evaluate_image_vague, create_test_slices to get top,bottom, left, right, center parts of a test image
# Added formula for calculating window size when webcam is used, also for rectangular form
# Added functions: random crop, random rotation, set scale, remove small object area
# Added def convert_result for converting sub-class to main-class result.
# Changed tensorboard backup path and added sub-folder to store tensorboard logs so that the logs can be compared easily.
# Changed model name to include specification info of a model.
# Specification information of a model such as number of hidden layers and tensor size must be set as the same when this model is reused later.
# Added functionality of continuing a broken training
# Added distortion tools for automatically generating and moving/removing data
# Added tensorboard log timestamp for comparing different model in live time, changed tensorboard log path
# Added function to do tracking in terms of shift mean #
# Added date time for log
# Training set: CAD samples for all six classes
# Added functionality of saving first convolutional layer feature output in training phase and test phase
# Added function to evaluate model with webcam
# Prepare_list is activated according to action selected for training or test
# Test set: lego positive samples for all six classes
# Added output info: when evaluating with images, proportion of correctly classified is included
# Added sequence configurations for based on training or test which is selected
# Added function to save correctly classified images/frames
# Added function to save misclassified images to folder ../MisClassifed, upper limit can be set
# Added log function, time count for training duration
# Test_Images: stored under ../Test_Images, they are lego positive samples that are not included in training set.
# Added the functionality to evaluate model with images
# Changed prepare_list to a global function to make test run smoothly.
# Changed condition for label, predict
# Changed display precision of matrix outputs to 2
# Added a formula to calculate shape, in settings.py
# Added a formula to set cropped frame to show ROI in demo
# Tested video_crop_tool.py, it does not require strict parameter for width as in this script
# Added global variables for width, height, crop sizes, defined in settings.py
# Changed places to adapt to lego data
# - All file paths in tensor_cnn_video.py, prepare_list.py, image_distortions.py, test.py
# - LABELS(=6), which is the number of sub-folders under ../Data
# To see tensorflow output use following command
# $tensorflow --logdir='enter_the_path_of_tensorboard_log'
#####################################################################################################
import Image
import ImageFilter
from functools import wraps
from random import randint
import time
import datetime
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import PIL
from sklearn import datasets
from scipy import ndimage
import math
import operator
import imutils
from PIL import Image # hy: create video with images
import settings # hy: collection of global variables
import prep_image
import tools
# activate global var
settings.set_global()
start_time = time.time()
# http://lvdmaaten.github.io/tsne/ visualization
## Train or Evaluation
############################################################
RETRAIN = True
current_step = 141 # 4311, 4791,1211, 3271, 3491, 21291 72.4 model_60_h18_w18_c8-79302-top
# Network Parameters
#learning_rate = 0.02509 # 0.03049 #0.015 #0.07297 #0.09568# TODO 0.05 0.005 better, 0.001 good \0.02, 0.13799 to 0.14 good for 6 classes,
# #0.13999 (flat) to 0.13999 (gap) for 7 classes, 0.0699965 for 6 classes with fine samples
# 0.0035 for links+rechts 98%;
#n_hidden = 360 # 162*6 # 128
# 300: horizontal 20%
# 360: until 1200 step good, after that test acc remains
# 200: start to increase early, 200, but does not increase lot any more
# 150, 250, 300, 330, 400: until 70 iter 17%
# Select architecture
Graph_2conv = 0
Graph_3conv = 1
Graph_3conv_same_dropout = 0
Graph_4conv = 0
if Graph_2conv == 1:
arch_str = '2conv'
if Graph_3conv == 1 or Graph_3conv_same_dropout == 1:
arch_str = '3conv'
#if Graph_3conv == 1:
#dropout = [0.25, 0.25, 0.25, 0.25] # 3,4,5,5
#dropout = [0.25] #3,4,5,5
#dropout_1s = [1] * len(dropout)
# dropout = 0.5 # Dropout, probability to keep units
if Graph_4conv == 1:
arch_str = '4conv'
save_all_models = 1
act_min = 0.80
act_max = 0.93
add_data = 0 # initial
area_step_size_webcam = 20 # 479 #200
#optimizer_type = 'GD' # 'adam' #GD-'gradient.descent'
set_STOP = False
stop_loss = 7000.8 # 1.118
stop_train_loss_increase_rate = 70000.08 # 1.01
stop_acc_diff = 5 # 3
stop_acc = 1 # 0.7
last_best_acc = 0
last_best_test_acc = 0
last_loss = 100
CONTINUE_TRAIN = True
GENERATE_FILELIST = 1
log_on = True
DEBUG = 1
TrainingProp = 0.70
###########################################################################################################
# the rest functions are also separately located in *evaluation* file, they will be updated only sometimes.
###########################################################################################################
TEST_with_Webcam = False # hy True - test with webcam
video_label = 0 # hy: initialize/default 0:hinten 1:links 2:'oben/', 3:'rechts/', '4: unten/', 5 'vorn/
TEST_with_Images = False # hy True - test with images
TEST_with_Video = False # hy True - test with video
video_window_scale = 2
TEST_CONV_OUTPUT = False
result_for_table = 0
SAVE_Misclassified = 0
SAVE_CorrectClassified = 0
# Input data
# n_input = 42 * 42 # Cifar data input (img shape: 32*32)
n_input = settings.h_resize * settings.w_resize # hy
n_classes = len(settings.LABELS) # hy: adapt to lego composed of 6 classes. Cifar10 total classes (0-9 digits)
# Noise level
noise_level = 0
# Data
LABEL_LIST = settings.data_label_file
LABEL_PATH = settings.data_label_path
LABEL_LIST_TEST = settings.test_label_file
LABEL_PATH_TEST = settings.test_label_path
LABELS = settings.LABELS # hy
LABEL_names = settings.LABEL_names # hy
# Active fields test for visualization
do_active_fields_test = 0
if do_active_fields_test == 1:
print 'To get active fields analysis you must set read_images to sorted read'
LABEL_PATH_TEST = "../Test_Images/test_active_fields/*/*" #
LABEL_LIST_TEST = settings.test_label_file_a
activation_test_img_name = '../Test_Images/hinten_ori1_rz400.jpg'
# auto-switches #########################
if RETRAIN or TEST_with_Images or TEST_with_Webcam or TEST_with_Video:
CONTINUE_TRAIN = False
if RETRAIN or CONTINUE_TRAIN:
TEST_with_Images = False
TEST_with_Webcam = False
TEST_with_Video = False
do_active_fields_test = 0
#########################################
# hy:add timestamp to tensor log files
from datetime import datetime
tensorboard_path = '../Tensorboard_data/sum107/' + str(datetime.now()) + '/'
tensor_model_sum_path = '../tensor_model_sum/'
#classifier_model = "../logs/" + "model_GD360_h184_w184_c6all_10_0.71-191_reprod_dropoutList_part2.meta"
if GENERATE_FILELIST == 1:
# image_distortions.rotateflipImg()
if RETRAIN or CONTINUE_TRAIN:
tools.prepare_list(LABEL_LIST, LABEL_PATH) # hy: avoid wrong list error #hy trial
tools.read_images(LABEL_LIST) # hy: get usable input size for w,h
tools.prepare_list(LABEL_LIST_TEST, LABEL_PATH_TEST) # hy: avoid wrong list error #hy trial
tools.read_images(LABEL_LIST_TEST) # hy: get usable input size for w,h
else:
print 'Creating test file list', LABEL_LIST_TEST, 'path', LABEL_PATH_TEST
tools.prepare_list(LABEL_LIST_TEST, LABEL_PATH_TEST) # hy: avoid wrong list error #hy trial
if do_active_fields_test == 1:
tools.read_images(LABEL_LIST_TEST, random_read=False) # hy: get usable input size for w,h
else:
tools.read_images(LABEL_LIST_TEST, random_read=True) # hy: get usable input size for w,h
else:
if TEST_with_Images or TEST_with_Video:
tools.read_images(LABEL_LIST_TEST)
# tools.read_image_output_slices(LABEL_LIST_TEST) #hy: get slices for activation analysis
else:
tools.read_images(LABEL_LIST)
training_iters = 300000#151 # 30000 # 1500 12500,
if CONTINUE_TRAIN:
training_iters = current_step + 90000 #90000010
display_step = 1 # a factor, will be multiplied by 10
print 'classes:', settings.LABELS
def track_roi(VIDEO_FILE):
video = cv2.VideoCapture(VIDEO_FILE) # hy: changed from cv2.VideoCapture()
# cv2.waitKey(10)
video.set(1, 2) # hy: changed from 1,2000 which was for wheelchair test video,
# hy: propID=1 means 0-based index of the frame to be decoded/captured next
if not video.isOpened():
print "cannot find or open video file"
exit(-1)
# Read the first frame of the video
ret, frame = video.read()
# Set the ROI (Region of Interest). Actually, this is a
# rectangle of the building that we're tracking
c, r, w, h = 900, 650, 400, 400
track_window = (c, r, w, h)
# Create mask and normalized histogram
roi = frame[r:r + h, c:c + w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
term_cond = (
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1) # hy: TERM_CRITERIA_EPS - terminate iteration condition
while True:
ret, frame = video.read()
if ret:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
ret, track_window = cv2.meanShift(dst, track_window, term_cond)
x, y, w, h = track_window
# hy: draw rectangle as tracked window area
cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
cv2.putText(frame, 'Tracked', (x - 25, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2, cv2.CV_AA)
cv2.imshow('Tracking', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print 'no frame received'
break
return [track_window]
def EVALUATE_IMAGES_sort_activation(sess):
# Testing
carimages, cartargets, f = tools.read_images(LABEL_LIST_TEST)
# carimages, cartargets, f = tools.read_image_output_slices(LABEL_LIST_TEST)
TEST_length = len(carimages)
# TEST_length = 1
# carimages = carimages / 255 - 0.5 #TODO here is tricky, double check wit respect to the formats
# digits.images = carimages.reshape((len(carimages), -1))
"""
print '\n'
print "4.print shape of database: ", digits.images.shape # hy
digits.images = np.expand_dims(np.array(digits.images), 2).astype(np.float32)
print "4.1.print shape of database after expansion: ", digits.images.shape # hy
digits.target = np.array(cartargets).astype(np.int32)
digits.target = dense_to_one_hot(digits.target)
print '\n'
print "5.print target"
print digits.target
"""
confMat1_TEST = np.zeros((n_classes, n_classes), dtype=np.float) # hy collect detailed confusion matrix
confMat2_TEST = np.zeros((2, 2), dtype=np.float)
confMat3 = np.zeros((1, n_classes), dtype=np.float)
count_labels = np.zeros((1, n_classes), dtype=np.float)
class_probability = np.zeros((1, n_classes), dtype=np.float)
pred_collect = []
slices = []
d = {'key': 'value'}
for i in range(0, TEST_length, 1):
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = carimages[i]
# im = frame_crop_resize_gray # Lazy
# from scipy import ndimage from scipy import misc
# im = ndimage.gaussian_filter(im, sigma=3)
# or
# im = ndimage.uniform_filter(im, size=11) #local mean
######################################
######################################
im = np.asarray(im, np.float32)
CONF = 0.20
test_image = im
test_labels = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
# print sess.run(test_image)
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_labels
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
output = sess.run(pred, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
pred_collect.append(output)
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output) #
np.set_printoptions(precision=3)
RES = np.argmax(output) # hy predicted label
label_target = int(cartargets[i]) # hy ground truth label
# label_pred_str, label_pred_num = tools.convert_result(RES)
# label_target_str, label_target_num = tools.convert_result(label_target)
predict = int(RES)
print '\nTestImage', i + 1, ':', f[i]
# print 'Image name', carimages
print 'Ground truth label:', LABELS[label_target][:-1], ', predict:', LABELS[RES][:-1], ', pres:', output[0][
RES] # hy
# print 'output all:', output[0] # hy
label = label_target
d[f[i]] = output[0][RES]
confMat1_TEST[label, predict] = confMat1_TEST[label, predict] + 1
count_labels[:, label] = count_labels[:, label] + 1
if predict == label_target:
label2_TEST = 0
pred2_TEST = 0
confMat3[:, int(RES)] = confMat3[:, int(RES)] + 1
tools.SAVE_CorrectClassified_Img(f[i], SAVE_CorrectClassified)
else:
label2_TEST = 1
pred2_TEST = 1
tools.SAVE_Misclassified_Img(f[i], SAVE_Misclassified)
confMat2_TEST[label2_TEST, pred2_TEST] = confMat2_TEST[label2_TEST, pred2_TEST] + 1
tp = confMat2_TEST[0, 0]
tn = confMat2_TEST[1, 1]
print '\nRank list of predicted results'
tools.rank_index(output[0], label_target)
print '\nCount correctly classified'
tools.print_label_title()
print confMat3
print 'Total labels'
print count_labels
print 'Proportion of correctly classified'
for pos in range(0, n_classes, 1):
if count_labels[:, pos] > 0:
class_probability[:, pos] = confMat3[:, pos] / count_labels[:, pos]
else:
class_probability[:, pos] = 0
print class_probability
# print '\ntp, tn, total number of test images:', tp, ', ', tn, ', ', TEST_length
# print confMat2_TEST
print '\nTEST general count:'
print confMat2_TEST
print 'TEST overall acc:', "{:.3f}".format(tp / TEST_length)
# print 'pred_collect', pred_collect
###################################################################################
## Feature output #################################################################
###################################################################################
if TEST_CONV_OUTPUT:
print '\nTEST feature output:'
# conv_feature = sess.run(conv1, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1})
conv_feature = sess.run(conv2, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1})
tools.get_feature_map(conv_feature, f, 'conv2')
else:
print 'no image got'
# print 'activation list', d
sorted_d = sorted(d.items(), key=operator.itemgetter(1), reverse=True)
print 'sorted', sorted_d
return confMat1_TEST
def EVALUATE_IMAGES(session, num_class, img_list, _labels): # (eva)
sess = session
LABEL_LIST_TEST = img_list
LABELS = _labels
n_classes = num_class
################### active field test part one ################################
if do_active_fields_test == 1:
carimages, cartargets, f = tools.read_images(LABEL_LIST_TEST, random_read=False)
TEST_length = len(carimages)
print '1 file', LABEL_LIST_TEST, 'path', LABEL_PATH_TEST, 'len', TEST_length
# TEST_length = 1
print 'get active fields'
row = 0
col = 0
test_img_bg = cv2.imread(activation_test_img_name)
test_img_bg = cv2.resize(test_img_bg, (400, 400))
overlay = np.zeros([400, 400, 3], dtype=np.uint8)
test_img_transparent = overlay.copy()
cv2.rectangle(overlay, (0, 0), (400, 400), color=(60, 80, 30, 3))
alpha = 0.7 # hy: parameter for degree of transparency
print 'test_img_bg', test_img_bg
cv2.addWeighted(overlay, alpha, test_img_bg, 1 - alpha, 0, test_img_transparent)
print 'test_img_transparent', test_img_transparent
bg = Image.fromarray(test_img_transparent)
print 'bg done'
else:
carimages, cartargets, f = tools.read_images(LABEL_LIST_TEST, random_read=False)
TEST_length = len(carimages)
print '1 file', LABEL_LIST_TEST, 'path', LABEL_PATH_TEST, 'len', TEST_length
if DEBUG == 1 and do_active_fields_test == 1:
overlay_show = Image.fromarray(overlay)
overlay_show.save('../1-overlay.jpg')
bg.save('../1-before.jpg')
################### active field test part one end ############################
# carimages = carimages / 255 - 0.5 #TODO here is tricky, double check wit respect to the formats
# digits.images = carimages.reshape((len(carimages), -1))
"""
print '\n'
print "4.print shape of database: ", digits.images.shape # hy
digits.images = np.expand_dims(np.array(digits.images), 2).astype(np.float32)
print "4.1.print shape of database after expansion: ", digits.images.shape # hy
digits.target = np.array(cartargets).astype(np.int32)
digits.target = dense_to_one_hot(digits.target)
print '\n'
print "5.print target"
print digits.target
"""
confMat1_TEST = np.zeros((n_classes, n_classes), dtype=np.float) # hy collect detailed confusion matrix
confMat2_TEST = np.zeros((2, 2), dtype=np.float)
confMat3 = np.zeros((1, n_classes), dtype=np.float)
count_labels = np.zeros((1, n_classes), dtype=np.float)
class_probability = np.zeros((1, n_classes), dtype=np.float)
pred_collect = []
if result_for_table == 0:
print 'True/False', 'No.', 'Name', 'TargetLabel', 'PredictLabel', 'Precision', 'whole_list', 'Top1', 'Top1_pres', \
'Top2', 'Top2_pres', 'Top3', 'Top3_pres', 'Top4', 'Top4_pres', 'Top5', 'Top5_pres', 'last', 'last_pres'
for i in range(0, TEST_length, 1):
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = carimages[i]
# im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
# print sess.run(test_image)
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
# output = sess.run("Accuracy:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
output = sess.run("pred:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
# print("Output for external=",output)
output = tools.convert_to_confidence(output) #
np.set_printoptions(precision=3)
RES = np.argmax(output) # hy predicted label
label_target = int(cartargets[i]) # hy ground truth label
# label_pred_str, label_pred_num = tools.convert_result(RES)
# label_target_str, label_target_num = tools.convert_result(label_target)
sorted_vec, prob_all = tools.rank_index(output[0], label_target)
pred_collect.append(prob_all[0])
################### active field test part two start ################################
if do_active_fields_test == 1:
print 'set up for active fields'
if col >= 4:
# print '\ncol is 4'
col = 0
row += 1
if row >= 4:
# print '\nrow is 4'
row = 0
positions = ((col) * 100, (row) * 100, (col + 1) * 100, (row + 1) * 100) # x0,y0, x1,y1
col += 1
# define image for obtaining its active fields
# activation_test_img = Image.open('../hintenTest.jpg')
# activation_test_img = Image.open('../vornTest.jpg')
# activation_test_img = Image.open('../tmp/resized/links/links_t2_1_rz400_d0_0400_1.jpg')
# activation_test_img = Image.open('../tmp/resized/links/links_t2_1_rz400_u870_400400.jpg')
# activation_test_img = Image.open('../Test_Images/hinten_ori1_rz400.jpg')
# activation_test_img = Image.open('../tmp/resized/oben/oben_t2_1_rz400_u856_400400.jpg')
# activation_test_img = Image.open('../tmp/resized/unten/unten_t2_1_rz400_d0_0400_1.jpg')
# activation_test_img = Image.open('../tmp/resized/unten/unten_t2_1_rz400_u923_400400.jpg')
# activation_test_img = Image.open('../tmp/resized/rechts/rechts_t2_1_rz400_d0_0400_1.jpg')
# activation_test_img = Image.open('../tmp/resized/rechts/rechts_t2_1_rz400_u825_400400.jpg')
# activation_test_img_copy = cv2.clone(activation_test_img)
activation_test_img = Image.open(activation_test_img_name)
thresh = float(max(pred_collect) * 0.97)
print 'thresh', thresh
if prob_all[0] > thresh:
# print '\nactive field', positions
image_crop_part = activation_test_img.crop(positions)
image_crop_part = image_crop_part.filter(ImageFilter.GaussianBlur(radius=1))
bg.paste(image_crop_part, positions)
bg.save('../active_fields.jpg')
################### active field test end ################################
if result_for_table == 1:
if LABELS[label_target][:-1] == LABELS[RES][:-1]:
print '\nTestImage', i + 1, f[i], LABELS[label_target][:-1] \
, LABELS[RES][:-1], prob_all[0],
for img_i in xrange(n_classes):
print settings.LABEL_names[sorted_vec[n_classes - 1 - img_i]], prob_all[img_i],
else:
print '\nMis-C-TestImage', i + 1, f[i], LABELS[label_target][:-1], \
LABELS[RES][:-1], prob_all[0],
for img_i in xrange(n_classes):
print settings.LABEL_names[sorted_vec[n_classes - 1 - img_i]], prob_all[img_i],
if result_for_table == 0:
print '\nTestImage', i + 1, ':', f[i]
# print 'Image name', carimages
print 'Ground truth label:', LABELS[label_target][:-1], '; predict:', LABELS[RES][:-1] # hy
# print 'Target:', label_target, '; predict:', RES # hy
print '\nRank list of predicted results'
tools.rank_index(output[0], label_target)
label = label_target
predict = int(RES)
confMat1_TEST[label, predict] = confMat1_TEST[label, predict] + 1
count_labels[:, label] = count_labels[:, label] + 1
if predict == label_target:
label2_TEST = 0
pred2_TEST = 0
confMat3[:, int(RES)] = confMat3[:, int(RES)] + 1
tools.SAVE_CorrectClassified_Img(f[i], SAVE_CorrectClassified)
else:
label2_TEST = 1
pred2_TEST = 1
tools.SAVE_Misclassified_Img(f[i], SAVE_Misclassified)
confMat2_TEST[label2_TEST, pred2_TEST] = confMat2_TEST[label2_TEST, pred2_TEST] + 1
tp = confMat2_TEST[0, 0]
tn = confMat2_TEST[1, 1]
# print summary
print '\n\nCount correctly classified'
tools.print_label_title()
print confMat3
print 'Total labels'
print count_labels
print '\nProportion of correctly classified'
for pos in range(0, n_classes, 1):
if count_labels[:, pos] > 0:
class_probability[:, pos] = confMat3[:, pos] / count_labels[:, pos]
print class_probability
# print '\ntp, tn, total number of test images:', tp, ', ', tn, ', ', TEST_length
# print confMat2_TEST
print '\nTEST general count:'
print confMat2_TEST
print 'TEST overall acc:', "{:.3f}".format(tp / TEST_length)
###################################################################################
## Feature output #################################################################
###################################################################################
if TEST_CONV_OUTPUT:
print '\nTEST feature output:'
test_writer = tf.train.SummaryWriter(tensorboard_path + settings.LABELS[label_target], sess.graph)
wc1 = sess.run("wc1:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
wc2 = sess.run("wc2:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
wd1 = sess.run("wd1:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
w_out = sess.run("w_out:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
bc1 = sess.run("bc1:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
bc2 = sess.run("bc2:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
bd1 = sess.run("bd1:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
b_out = sess.run("b_out:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
conv_feature = sess.run("conv2:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
# conv_feature_2D_batch = tools.get_feature_map(conv_feature,f,'conv2') #get defined conv value, not sure for conv2
# featureImg = sess.run("conv2img:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
summary_op = tf.merge_all_summaries()
test_res = sess.run(summary_op, feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
test_writer.add_summary(test_res, 1)
# print '2D size',len(conv_feature_2D_batch),'\n',sum(conv_feature_2D_batch[:])
print 'wc1 shape', wc1.shape, 'wc2:', wc2.shape, 'wd1:', wd1.shape, 'w_out:', w_out.shape
print 'bc1 shape ', bc1.shape, 'bc2:', ' ', bc2.shape, 'bd1: ', bd1.shape, 'b_out: ', b_out.shape
print 'pred shape', len(pred_collect)
else:
print 'no image got'
return (confMat1_TEST, count_labels, confMat3, class_probability)
def EVALUATE_IMAGES_VAGUE():
# Testing
cartargets, f = tools.read_test_images(LABEL_LIST_TEST)
# print 'cartargets label', cartargets
TEST_length = 20
# TEST_length = len(cartargets)
# carimages = carimages / 255 - 0.5 #TODO here is tricky, double check wit respect to the formats
# digits.images = carimages.reshape((len(carimages), -1))
"""
print '\n'
print "4.print shape of database: ", digits.images.shape # hy
digits.images = np.expand_dims(np.array(digits.images), 2).astype(np.float32)
print "4.1.print shape of database after expansion: ", digits.images.shape # hy
digits.target = np.array(cartargets).astype(np.int32)
digits.target = dense_to_one_hot(digits.target)
print '\n'
print "5.print target"
print digits.target
"""
confMat_m1_TEST = np.zeros((n_classes, n_classes), dtype=np.float)
confMat_m2_TEST = np.zeros((2, 2), dtype=np.float)
confMat_m3 = np.zeros((1, n_classes), dtype=np.float)
count_labels_m = np.zeros((1, n_classes), dtype=np.float)
class_probability_m = np.zeros((1, n_classes), dtype=np.float)
patch_size = 227
for i in range(0, TEST_length, 1):
# hy:extra Debug
# im = carimages[i]
# im = frame_crop_resize_gray # Lazy
'''
#hy: option to use numpy.ndarray, but it cannot use attribute 'crop' of Image (integer) object
img = cv2.imread(f[i])
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imutils.resize(img, width=patch_size, height=patch_size)
h_b, w_b = img.shape
print 'h_b', h_b, ', w_b', w_b
'''
print 'processing main test image', f[i]
# hy: use integer image: Image, resize
img = Image.open(f[i]).convert('LA') # convert to gray
h_b, w_b = img.size
# print 'read test image ok', h_b, ', ', w_b
img = img.resize((patch_size * 2, patch_size * 2), Image.BICUBIC) # hy:use bicubic
# h_b, w_b = img.size
# print 'h_b', h_b, ', w_b', w_b
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
test_image = img
test_image_label = cartargets[i]
# Doing something very stupid here, fix it!
# test_image = im.reshape((-1, im.size))
# test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
# test_image = test_image / 255 - 0.5 # TODO here is tricky, double check with respect to the formats
slices_rec = prep_image.create_test_slices(test_image, patch_size, test_image_label)
print 'slices with path received', slices_rec
slices_len = len(slices_rec)
out_sum = np.zeros((1, n_classes), dtype=np.float)
out_box = np.zeros((1, n_classes), dtype=np.float)
# batch_xs, batch_ys = im, cartargets
# output_im = sess.run(pred, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
for j in range(0, slices_len, 1):
print '\nprocessing slice', j, slices_rec[j]
# hy read and resize integer object
# im_s = Image.open(slices_rec[j]) #numpy.ndarray does not have attribute 'crop'
# im_s = im_s.resize((settings.h_resize, settings.w_resize), Image.BICUBIC) # hy:use bicubic, resize func reuqires integer object
# im_s = im_s.convert('LA') #hy convert to gray
# hy read and resize continuous number object
im_s = cv2.imread(slices_rec[j]) # result is not integer
im_s = cv2.cvtColor(im_s, cv2.COLOR_BGR2GRAY)
im_s = imutils.resize(im_s, width=settings.h_resize, height=settings.w_resize)
# hy conver to integer object required for tensor
im_s = np.asarray(im_s, np.float32)
CONF = 0.20
(sorted_vec, outputsub) = EVALUATE_IMAGE_SLICES(im_s, f, i, sess, cartargets)
print 'slice', j, 'result', sorted_vec
print 'Image slice', slices_rec[j]
outbox = outputsub
out_sum = out_sum + outputsub[0]
# print '\ntp, tn, total number of test images:', tp, ', ', tn, ', ', TEST_length
# print confMat2_TEST
print '\nTEST general count:'
print out_sum
print out_sum / slices_len
outbox[0] = out_sum / slices_len
output_im = tools.rank_index(outbox[0], test_image_label)
print 'target', test_image_label
print 'output final prediction', output_im[-1]
RES = int(output_im[-1])
print 'test_image_label', test_image_label
label = test_image_label
predict = int(RES)
confMat_m1_TEST[label, predict] = confMat_m1_TEST[label, predict] + 1
count_labels_m[:, test_image_label] = count_labels_m[:, test_image_label] + 1
if int(RES) == int(test_image_label):
label2_TEST = 0
pred2_TEST = 0
confMat_m3[:, int(RES)] = confMat_m3[:, int(RES)] + 1
tools.SAVE_CorrectClassified_Img(f[i], SAVE_CorrectClassified)
else:
label2_TEST = 1
pred2_TEST = 1
tools.SAVE_Misclassified_Img(f[i], SAVE_Misclassified)
# print 'Count classified'
# tools.print_label_title()
# print confMat1_TEST
confMat_m2_TEST[label2_TEST, pred2_TEST] = confMat_m2_TEST[label2_TEST, pred2_TEST] + 1
tp = confMat_m2_TEST[0, 0]
tn = confMat_m2_TEST[1, 1]
print 'Count classified m1 - confusion matrix'
tools.print_label_title()
print confMat_m1_TEST
print '\nCount correctly classified -m3'
tools.print_label_title()
print confMat_m3
print 'tp,np -m2'
print confMat_m2_TEST
print 'Total labels'
print count_labels_m
print 'Proportion of correctly classified for detailed analysis' # ok
if count_labels_m[:, pos] > 0:
for pos in range(0, n_classes, 1):
class_probability_m[:, pos] = confMat_m3[:, pos] / count_labels_m[:, pos]
print class_probability_m
print 'TEST overall acc:', "{:.3f}".format(tp / TEST_length)
def EVALUATE_IMAGE_SLICES(img, f, index, sess, cartargets): # hy todo change dimension to fit tensorflow
confMat1_TEST = np.zeros((n_classes, n_classes), dtype=np.float)
confMat2_TEST = np.zeros((2, 2), dtype=np.float)
confMat3 = np.zeros((1, n_classes), dtype=np.float)
count_labels = np.zeros((1, n_classes), dtype=np.float)
class_probability = np.zeros((1, n_classes), dtype=np.float)
img_s = img
i = index
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# Doing something very stupid here, fix it!
test_image = img_s.reshape((-1, img_s.size))
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check with respect to the formats
batch_xs1, batch_ys1 = test_image, test_lables
output = sess.run(pred, feed_dict={x: batch_xs1, y: batch_ys1, keep_prob: 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output) #
np.set_printoptions(precision=3)
RES = np.argmax(output)
label_target = int(cartargets[i])
print '\nTestImage', i + 1, ':', f[i]
# print 'Image name', carimages
print 'Target:', LABELS[label_target][:-1], '; predict:', LABELS[RES][:-1] # hy
# print 'Target:', label_target, '; predict:', RES # hy
count_labels[:, label_target] = count_labels[:, label_target] + 1
label = label_target
predict = int(RES)
# hy: INFO - print label, predict
# print 'labels_onehot:', labels_onehot[i, :], ' label=', label
# print 'score:', scores[i, :]
# print 'predict:', predict
# if label == predict:
confMat1_TEST[label, predict] = confMat1_TEST[label, predict] + 1
if int(RES) == label_target:
label2_TEST = 0
pred2_TEST = 0
confMat3[:, int(RES)] = confMat3[:, int(RES)] + 1
tools.SAVE_CorrectClassified_Img(f[i], SAVE_CorrectClassified)
else:
label2_TEST = 1
pred2_TEST = 1
tools.SAVE_Misclassified_Img(f[i], SAVE_Misclassified)
# print 'Count classified'
# tools.print_label_title()
# print confMat1_TEST
confMat2_TEST[label2_TEST, pred2_TEST] = confMat2_TEST[label2_TEST, pred2_TEST] + 1
tp = confMat2_TEST[0, 0]
tn = confMat2_TEST[1, 1]
print '\nCount correctly classified'
tools.print_label_title()
print confMat3
# print 'Total labels'
# print count_labels
# print 'Proportion of correctly classified'
# if count_labels[:,pos] > 0:
# for pos in range(0, 6, 1):
# class_probability[:, pos] = confMat3[:, pos] / count_labels[:, pos]
# print class_probability
# print '\nRank list of predicted results'
sorted_vec = tools.rank_index(output[0], label_target)
# return (confMat1_TEST, confMat2_TEST, confMat3, count_labels, class_probability,sorted_vec,output)
return (sorted_vec, output)
def EVALUATE_WITH_WEBCAM(camera_port, stop):
# hy: check camera availability
camera = cv2.VideoCapture(camera_port)
if stop == False:
# if ckpt and ckpt.model_checkpoint_path:
# Camera 0 is the integrated web cam on my netbook
# Number of frames to throw away while the camera adjusts to light levels
ramp_frames = 1
i = 0
while True: # hy: confirm camera is available
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
print 'Getting image...'
ret, frame = camera.read()
# Captures a single image from the camera and returns it in PIL format
# ret = camera.set(3, 320) #hy use properties 3 and 4 to set frame resolution. 3- w, 4- h
# ret = camera.set(4, 240)
cv2.waitKey(1)
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide.
# cv2.imwrite(file, camera_capture)
#################################### /////////////////////////////
if frame is not None:
# print 'frame from webcam obtained'
# hy: before continue check if image is read correctly
# while frame is not None:
i += 1
# hy:
h_frame = frame.shape[0]
w_frame = frame.shape[1] # hy: h 1536 x w 2304
# hy: info
print "h_video and w_video", h_frame, ",", w_frame
# cv2.imshow("ori", frame)
# crop_x1 = int((w_frame - area_step_size_webcam) / 2)
# crop_y1 = int((h_frame - area_step_size_webcam) / 2) # 1#200
# crop_x2 = crop_x1 + area_step_size_webcam
# crop_y2 = int(crop_y1 + area_step_size_webcam * settings.h_resize / settings.w_resize)
crop_y1 = int((h_frame - area_step_size_webcam) / 2) # 1#200
crop_x1 = int((w_frame - area_step_size_webcam) / 2)
crop_y2 = crop_y1 + area_step_size_webcam # hy:define shorter side as unit length to avoid decimal
crop_x2 = crop_x1 + area_step_size_webcam * settings.w_resize / settings.h_resize
# print "x1,y1,x2,y2", crop_x1, 'x', crop_y1, ',', crop_x2, 'x', crop_y2
# Crop
# hy: select suitable values for the area of cropped frame,
# adapt to the ratio of h to w after resized, e.g. 42x42 ie.w=h
frame_crop = frame[crop_y1:crop_y2, crop_x1:crop_x2]
# hy: info
# print "shape:y1,y2,x1,x2:", crop_y1," ", crop_y2," ", crop_x1," ", crop_x2
# print "Shape of cropped frame:", frame_crop.shape #hy: it should be same as the shape of trained images(the input image)
cv2.imshow("frame_cropped", frame_crop)
# Resize
# frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=42)
# hy: it is something different to video_crop_tool.py, here for tensorflow the width has to be that of input image
frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY),
width=settings.w_resize)
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
cv2.imshow("TensorFlow Window", imutils.resize(im.astype(np.uint8), 227)) # hy trial
# Adding noise to the street image #TODO
# im=add_noise(im,5)
# Bluring the image to help detection #TODO
# im = cv2.GaussianBlur(im,(5,5),0)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
# print test_image
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
output = sess.run(pred, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output)
np.set_printoptions(precision=2)
print '\nFrame', i
tools.print_label_title_conf()
print 'confidence =', output # hy
RES = np.argmax(output)
label_pred_str = LABELS[RES][:-1]
# label_pred_str, label_pred_num = tools.convert_result(RES)
# print 'label_pred_str', label_pred_str
print 'predicted label:', LABELS[RES][:-1]
if label_pred_str == video_label:
label2_TEST_Video = 0
pred2_TEST_Video = 0
name_str = settings.CorrectClassified + "/frame_crop%d.jpg" % i
tools.SAVE_CorrectClassified_frame(name_str, frame_crop, SAVE_CorrectClassified)
else:
label2_TEST_Video = 1
pred2_TEST_Video = 1
name_str = settings.Misclassified + "/frame_crop%d.jpg" % i
tools.SAVE_Misclassified_frame(name_str, frame_crop, SAVE_Misclassified)
cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=1)
# cv2.putText(frame, "predicted1: " + label_pred_str, org=(w_frame / 10, h_frame / 20),
# fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
cv2.putText(frame, "predicted1: " + label_pred_str, org=(w_frame / 10, h_frame / 20),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
prob_str = str(output[0][RES])[:4]
cv2.putText(frame, "prob:" + prob_str, org=(w_frame / 10, h_frame / 8),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
# hy: could be modified to display desired label
# cv2.putText(frame, LABELS[RES], org=(800, 100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255,0,0), thickness=3 )
# cv2.putText(frame, str(video.get(1)), org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
# color=(0, 255, 0), thickness=1)
frame_demo = imutils.resize(frame, width=1200)
# frame_demo = imutils.resize(frame, width = min(1200, settings.w_resize * 30)) #hy: choose a smaller window size
cv2.imshow("Demo", frame_demo)
cv2.waitKey(300)
# TODO add termination condition
print 'no frame retrieved'
del (camera)
return stop
def EVALUATE_WITH_WEBCAM_track_roi(camera_port):
frame_index_i = 0
crop_x1 = 300
area_step_size = 200
crop_y1 = 200
# hy: check camera availability
camera = cv2.VideoCapture(camera_port)
# Read the first frame of the video
ret, frame = camera.read()
# Set the ROI (Region of Interest). Actually, this is a
# rectangle of the building that we're tracking
###############################################################################################
# Track
###############################################################################################
c, r, w, h = 100, 200, 200, 200
track_window = (c, r, w, h)
# track_window = (x0, y0, w, h)
# Create mask and normalized histogram
roi = frame[r:r + h, c:c + w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
term_cond = (
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1) # hy: TERM_CRITERIA_EPS - terminate iteration condition
# hy: initialization of confmatrix
confMat2_TEST_Video = np.zeros((2, 2), dtype=np.float)
while True: # hy: confirm camera is available
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
print 'Getting image...'
# Captures a single image from the camera and returns it in PIL format
ret, frame = camera.read()
# ret = camera.set(3, 320) #hy use properties 3 and 4 to set frame resolution. 3- w, 4- h
# ret = camera.set(4, 240)
cv2.waitKey(1)
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide.
# cv2.imwrite(file, camera_capture)
if ret:
frame_index_i = frame_index_i + 1
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
print 'hsv done'
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
print 'back project done'
ret, track_window = cv2.meanShift(dst, track_window, term_cond)
print 'ret'
xt, yt, wt, ht = track_window
# hy info
print 'xt,yt,wt,ht:', xt, ',', yt, ',', wt, ',', ht
# hy: draw rectangle as tracked window area
cv2.rectangle(frame, (xt, yt), (xt + wt, yt + ht), 255, 2)
cv2.putText(frame, 'tracked', (xt - 25, yt - 10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2, cv2.CV_AA)
cv2.waitKey(100)
###############################################################################################
# evaluate
###############################################################################################
# hy: info
# print "shape in evaluate:x1,y1:", crop_x1, ',', crop_y1
crop_x1 = xt
crop_x2 = xt + wt
crop_y1 = yt
area_step_size = ht
crop_y2 = crop_y1 + area_step_size * settings.h_resize / settings.w_resize
# hy: select suitable values for the area of cropped frame,
# adapt to the ratio of h to w after resized, e.g. 42x42 ie.w=h
frame_crop = frame[crop_y1:crop_y2, crop_x1:crop_x2]
# hy: info
print "shape after set_video_window:y1, y2, x1, x2:", crop_y1, ',', crop_y2, ',', crop_x1, ',', crop_x2
# hy:
h_frame = frame.shape[0]
w_frame = frame.shape[1] # hy: h 1536 x w 2304
#######################################################################################
# Evaluate
#######################################################################################
# hy: info
# print "shape:y1,y2,x1,x2:", crop_y1," ", crop_y2," ", crop_x1," ", crop_x2
# print "Shape of cropped frame:", frame_crop.shape #hy: it should be same as the shape of trained images(the input image)
cv2.imshow("frame_cropped", frame_crop)
# Resize
# frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=42)
# hy: it is something different to video_crop_tool.py, here for tensorflow the width has to be that of input image
frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY),
width=settings.w_resize)
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
cv2.imshow("TensorFlow Window", imutils.resize(im.astype(np.uint8), 200))
# Adding noise to the street image #TODO
# im=add_noise(im,5)
# Bluring the image to help detection #TODO
# im = cv2.GaussianBlur(im,(5,5),0)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
# print test_image
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
output = sess.run(pred, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output)
np.set_printoptions(precision=2)
print '\nFrame', frame_index_i
tools.print_label_title_conf()
print 'confidence =', output # hy
RES = np.argmax(output)
if int(RES) == video_label:
label2_TEST_Video = 0
pred2_TEST_Video = 0
name_str = settings.CorrectClassified + "/frame_crop%d.jpg" % frame_index_i
tools.SAVE_CorrectClassified_frame(name_str, frame_crop, SAVE_CorrectClassified)
else:
label2_TEST_Video = 1
pred2_TEST_Video = 1
name_str = settings.Misclassified + "/frame_crop%d.jpg" % frame_index_i
tools.SAVE_Misclassified_frame(name_str, frame_crop, SAVE_Misclassified)
cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=1)
cv2.putText(frame, "predicted1: " + LABELS[RES], org=(w_frame / 10, h_frame / 20),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
# cv2.putText(frame, "predicted2: " + LABELS[RES], org=(w_frame / 10, h_frame / 20),
# fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=5, color=(0, 255, 0), thickness=5)
output_display = str(output[0][RES])[:4]
cv2.putText(frame, "prob:" + output_display, org=(w_frame / 10, h_frame / 8),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
# cv2.putText(frame, "predicted1: " + LABELS[RES] + ", prob:" + output[RES], org=(w_frame / 6, h_frame / 10),
# fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=3)
# hy: could be modified to display desired label
# cv2.putText(frame, LABELS[RES], org=(800, 100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255,0,0), thickness=3 )
# cv2.putText(frame, str(video.get(1)), org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
# color=(0, 255, 0), thickness=1)
frame_demo = imutils.resize(frame, width=1200)
# frame_demo = imutils.resize(frame, width = min(1200, settings.w_resize * 30)) #hy: choose a smaller window size
cv2.imshow("Demo", frame_demo)
cv2.waitKey(300)
else:
print 'no frame retrieved'
break
# hy TODO add termination condition
del (camera)
def Evaluate_VIDEO_track_roi(VIDEO_FILE):
video = cv2.VideoCapture(VIDEO_FILE) # hy: changed from cv2.VideoCapture()
# cv2.waitKey(10)
video.set(1, 2) # hy: changed from 1,2000 which was for wheelchair test video,
# hy: propID=1 means 0-based index of the frame to be decoded/captured next
if not video.isOpened():
print "cannot find or open video file"
exit(-1)
# Read the first frame of the video
ret, frame = video.read()
# Set the ROI (Region of Interest). Actually, this is a
# rectangle of the building that we're tracking
###############################################################################################
# Track
###############################################################################################
c, r, w, h = 600, 450, 600, 600
track_window = (c, r, w, h)
# track_window = (x0, y0, w, h)
# Create mask and normalized histogram
roi = frame[r:r + h, c:c + w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
term_cond = (
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1) # hy: TERM_CRITERIA_EPS - terminate iteration condition
# hy: initialization of confmatrix
confMat2_TEST_Video = np.zeros((2, 2), dtype=np.float)
video_frame_i = 0
while True:
ret, frame = video.read()
if ret:
video_frame_i = video_frame_i + 1
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
ret, track_window = cv2.meanShift(dst, track_window, term_cond)
xt, yt, wt, ht = track_window
# hy info
# print 'xt,yt,wt,ht:', xt, ',', yt, ',', wt, ',' , ht
# hy: draw rectangle as tracked window area
cv2.rectangle(frame, (xt, yt), (xt + wt, yt + ht), 255, 2)
cv2.putText(frame, 'tracked', (xt - 25, yt - 10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2, cv2.CV_AA)
cv2.waitKey(500)
###############################################################################################
# evaluate
###############################################################################################
# hy: info
# print "shape in evaluate:x1,y1:", crop_x1, ',', crop_y1
crop_x1 = xt
crop_x2 = xt + wt
crop_y1 = yt
area_step_size = ht
crop_y2 = crop_y1 + area_step_size * settings.h_resize / settings.w_resize
# hy: select suitable values for the area of cropped frame,
# adapt to the ratio of h to w after resized, e.g. 42x42 ie.w=h
frame_crop = frame[crop_y1:crop_y2, crop_x1:crop_x2]
# hy: info
# print "shape after set_video_window:y1, y2, x1, x2:", crop_y1,',', crop_y2, ',', crop_x1, ',', crop_x2
# hy:
h_frame = frame.shape[0]
w_frame = frame.shape[1] # hy: h 1536 x w 2304
# hy: info
# print "Shape of cropped frame:", frame_crop.shape #hy: it should be same as the shape of trained images(the input image)
cv2.imshow("frame_cropped", frame_crop)
# Resize
# frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=42)
# hy: it is something different to video_crop_tool.py, here for tensorflow the width has to be that of input image
frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=settings.w_resize)
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
cv2.imshow("TensorFlow Window", imutils.resize(im.astype(np.uint8), 200))
# Adding noise to the street image #TODO
# im=add_noise(im,5)
# Bluring the image to help detection #TODO
# im = cv2.GaussianBlur(im,(5,5),0)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
# print test_image
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
output = sess.run(pred, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output)
np.set_printoptions(precision=2)
print '\nFrame', video_frame_i
tools.print_label_title_conf()
print 'confidence =', output # hy
RES = np.argmax(output)
print "argmax =", np.argmax(output) # hy
print "label, predict =", video_label, ', ', RES # hy
if int(RES) == video_label:
label2_TEST_Video = 0
pred2_TEST_Video = 0
name_str = settings.CorrectClassified + "/frame_crop%d.jpg" % video_frame_i
tools.SAVE_CorrectClassified_frame(name_str, frame_crop, SAVE_CorrectClassified)
else:
label2_TEST_Video = 1
pred2_TEST_Video = 1
name_str = settings.Misclassified + "/frame_crop%d.jpg" % video_frame_i
tools.SAVE_Misclassified_frame(name_str, frame_crop, SAVE_Misclassified)
confMat2_TEST_Video[label2_TEST_Video, pred2_TEST_Video] = confMat2_TEST_Video[
label2_TEST_Video, pred2_TEST_Video] + 1
cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=1)
cv2.putText(frame, "predicted: " + LABELS[RES], org=(w_frame / 3, h_frame / 10),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(0, 255, 0), thickness=4)
cv2.putText(frame, str(video.get(1)), org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
color=(0, 255, 0), thickness=1)
frame_demo = imutils.resize(frame, width=1200)
# frame_demo = imutils.resize(frame, width = min(1200, settings.w_resize * 30)) #hy: choose a smaller window size
cv2.imshow("Demo", frame_demo)
cv2.waitKey(300)
# hy: other options - control to move ROI downwards, then to the right
# crop_y1 = crop_y1 + area_step_size/50
# if crop_y2+area_step_size >= frame.shape[0]:
# crop_y1 = 0
# crop_x1 = crop_x1 + 200
# if crop_x2+area_step_size >= frame.shape[1]:
##crop_x1 = 0
# break
else:
print 'no frame retrieved'
break # hy added
tp = confMat2_TEST_Video[0, 0]
tn = confMat2_TEST_Video[1, 1]
# print confMat2_TEST_Video
# print 'tp, tn, total number of test images:', tp, ', ', tn, ', ', tp + tn
print confMat2_TEST_Video
print 'TEST acc:', "{:.4f}".format(tp / (tp + tn))
cv2.waitKey(100)
if cv2.waitKey(1) & 0xFF == ord('q'): # hy:press key-q to quit
break
###############################################################################################
# cv2.imshow('Tracking', frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# else:
# print 'no frame received for tracking'
# break
def EVALUATE_VIDEO(VIDEO_FILE):
video = cv2.VideoCapture(VIDEO_FILE) # hy: changed from cv2.VideoCapture()
# cv2.waitKey(10)
video.set(1, 2) # hy: changed from 1,2000 which was for wheelchair test video,
# hy: propID=1 means 0-based index of the frame to be decoded/captured next
# video.open(VIDEO_FILE)
# hy: for debug
if not video.isOpened():
print "cannot find or open video file"
exit(-1)
## Reading the video file frame by frame
# hy: initialization of confmatrix
confMat2_TEST_Video = np.zeros((2, 2), dtype=np.float)
video_frame_i = 0
while True:
video_frame_i += 1
ret, frame = video.read()
if ret:
# hy:
h_frame = frame.shape[0]
w_frame = frame.shape[1] # hy: h 1536 x w 2304
# hy: info
# print "h_video and w_video", h_resize, ",", w_resize
# cv2.imshow("ori", frame)
# print "frame size hxw", frame.shape[0]," ", frame.shape[1]
crop_x2 = crop_x1 + area_step_size
# crop_y2 = (crop_y1 + (crop_x2 - crop_x1)) * settings.h_resize / settings.w_resize
crop_y2 = crop_y1 + area_step_size * settings.h_resize / settings.w_resize
# Crop
# frame_crop = frame[350:750, 610:1300] #hy: ori setting for w24xh42
# hy: select suitable values for the area of cropped frame,
# adapt to the ratio of h to w after resized, e.g. 42x42 ie.w=h
frame_crop = frame[crop_y1:crop_y2, crop_x1:crop_x2]
# hy: info
print "shape:y1,y2,x1,x2:", crop_y1, ", ", crop_y2, ", ", crop_x1, ", ", crop_x2
# print "Shape of cropped frame:", frame_crop.shape #hy: it should be same as the shape of trained images(the input image)
cv2.imshow("frame_cropped", frame_crop)
# Resize
# frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=42)
# hy: it is something different to video_crop_tool.py, here for tensorflow the width has to be that of input image
frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=settings.w_resize)
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
cv2.imshow("TensorFlow Window", imutils.resize(im.astype(np.uint8), 227))
# Adding noise to the street image #TODO
# im=add_noise(im,5)
# Bluring the image to help detection #TODO
# im = cv2.GaussianBlur(im,(5,5),0)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
# print test_image
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
output = sess.run(pred, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output)
np.set_printoptions(precision=2)
print '\nFrame', video_frame_i
tools.print_label_title_conf()
print 'confidence =', output # hy
RES = np.argmax(output)
print "argmax =", np.argmax(output) # hy
label_pred_str = LABELS[RES][:-1]
# hy: qfor sub-classes
# label_pred_str, label_pred_num = tools.convert_result(RES) # hy use it when sub-classes are applied
# RES_sub_to_face = class_label #hy added
print "label, predict =", video_label, ', ', RES # hy
if label_pred_str == video_label:
label2_TEST_Video = 0
pred2_TEST_Video = 0
name_str = settings.CorrectClassified + "/frame_crop%d.jpg" % video_frame_i
tools.SAVE_CorrectClassified_frame(name_str, frame_crop, SAVE_CorrectClassified)
else:
label2_TEST_Video = 1
pred2_TEST_Video = 1
name_str = settings.Misclassified + "/frame_crop%d.jpg" % video_frame_i
tools.SAVE_Misclassified_frame(name_str, frame_crop, SAVE_Misclassified)
confMat2_TEST_Video[label2_TEST_Video, pred2_TEST_Video] = confMat2_TEST_Video[
label2_TEST_Video, pred2_TEST_Video] + 1
# Make a little demonstration (hy:static window version)
# hy: showing evaluation result identified class on video
# if RES == 0 or RES == 2:
# cv2.rectangle(frame,(610, 350), (1300, 750), color=(0, 255, 0), thickness=20)
# cv2.putText(frame, 'Available', org=(800, 300), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(0,255,0),thickness=4)
# else:
# cv2.rectangle(frame,(610, 350), (1300, 750), color=(0, 0, 255), thickness=20)
# cv2.putText(frame, 'Occupied', org=(800, 300), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(0,0,255), thickness=4)
# hy: TODO adapt to current app
# if RES == 0 or RES == 2:
# cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=20)
# cv2.putText(frame, 'Available', org=(800, 300), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2,
# color=(0, 255, 0), thickness=4)
# else:
# cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 0, 255), thickness=20)
# cv2.putText(frame, 'Occupied', org=(800, 300), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2,
# color=(0, 0, 255), thickness=4)
cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=1)
cv2.putText(frame, "predicted: " + label_pred_str, org=(w_frame / 3, h_frame / 10),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(0, 255, 0), thickness=4)
# hy: could be modified to display desired label
# cv2.putText(frame, label_pred_str, org=(800, 100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255,0,0), thickness=3 )
cv2.putText(frame, str(video.get(1)), org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
color=(0, 255, 0), thickness=1)
# cv2.putText(frame, label_pred_str, org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
# color=(0, 255, 0), thickness=1)
frame_demo = imutils.resize(frame, width=1200)
# frame_demo = imutils.resize(frame, width = min(1200, settings.w_resize * 30)) #hy: choose a smaller window size
cv2.imshow("Demo", frame_demo)
cv2.waitKey(300)
# hy: other options - control to move ROI downwards, then to the right
# crop_y1 = crop_y1 + area_step_size/50
# if crop_y2+area_step_size >= frame.shape[0]:
# crop_y1 = 0
# crop_x1 = crop_x1 + 200
# if crop_x2+area_step_size >= frame.shape[1]:
##crop_x1 = 0
# break
else:
print 'no frame retrieved'
break # hy added
tp = confMat2_TEST_Video[0, 0]
tn = confMat2_TEST_Video[1, 1]
# print confMat2_TEST_Video
# print 'tp, tn, total number of test images:', tp, ', ', tn, ', ', tp + tn
print confMat2_TEST_Video
print 'TEST acc:', "{:.4f}".format(tp / (tp + tn))
if cv2.waitKey(1) & 0xFF == ord('q'): # hy:press key-q to quit
break
def confusion_matrix(labels_onehot, scores, normalized=True):
n_samples, n_class = scores.shape
print 'n_samples for validation:', n_samples
conf_matrix = np.zeros((n_class, n_class), dtype=np.float32)
conf_matrix_2 = np.zeros((2, 2), dtype=np.float32)
for i in range(0, n_samples):
label = np.argmax(labels_onehot[i, :])
predict = np.argmax(scores[i, :])
# hy: INFO - print label, predict
# print 'labels_onehot:', labels_onehot[i, :], ' label=', label
# print 'score:', scores[i, :]
# print 'predict:', predict
conf_matrix[label, predict] = conf_matrix[label, predict] + 1
# Mapping labels
'''
if label == 0 or label == 2:
label2 = 0
else:
label2 = 1
if predict == 0 or predict == 2:
predict2 = 0
else:
predict2 = 1
'''
#################################################################################################################
# hy: adapt to lego classes
# hy: use it count corrected predict
# print label2, predict2
if label == predict: # hy: true positive
# hy: conf_matrix_2 true positive index 0,0
label2 = 0
predict2 = 0
else:
# hy: conf_matrix_2 true positive index 1,1
label2 = 1
predict2 = 1
#################################################################################################################
conf_matrix_2[label2, predict2] = conf_matrix_2[label2, predict2] + 1.0
# hy: confusion matrix
# [ tp fn]
# [ fp tn]
# tp: count label=predict / total
# tn: label!=predict
# fp: 1-tp
# fn: 1-tn
if normalized:
for i in range(0, n_class):
conf_matrix[i, :] = conf_matrix[i, :] / np.sum(conf_matrix[i, :])
return conf_matrix, conf_matrix_2
# def dense_to_one_hot(labels_dense, num_classes=n_classes):
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
if DEBUG == 1:
print 'one_hot_vector:', labels_one_hot[0]
return labels_one_hot
# Implementing softmax function on the DL output scores, adopted only for 2 classes
# hy: for final output layer using softmax classification
def convert_to_confidence(scores):
h, w = scores.shape
output = np.zeros((h, w), dtype=np.float32)
sum = np.zeros((h, 1), dtype=np.float32)
# if sum != 0:
for i in range(0, w):
sum[:, 0] = sum[:, 0] + np.exp(scores[:, i])
# print 'sum i =', sum[:, 0]
for i in range(0, w):
# print 'sum out =', sum[:, 0]
output[:, i] = np.exp(scores[:, i]) / sum[:, 0]
# class0=math.exp(scores[0,0])/(math.exp(scores[0,1])+math.exp(scores[0,0]))
# class1=math.exp(scores[0,1])/(math.exp(scores[0,1])+math.exp(scores[0,0]))
# output=[class0, class1]
# else:
# print 'sum is 0'
return output
# Adds noise to gray level images, nomalizes the image again
def add_noise(img, noise_level):
img = img.astype(np.float32)
h = img.shape[0]
w = img.shape[1]
img_noised = img + np.random.rand(h, w) * noise_level
img_noised = (img_noised / np.max(img_noised)) * 255
# img_noised=img_noised.astype(np.uint8)
return img_noised
'''
# Create model
def conv2d(img, w, b, k):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, k, k, 1], padding='SAME'), b))
def max_pool(img, k):
return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
'''
# remove old files in tensorflow folder
if RETRAIN:
cmd = 'rm -rf ' + tensorboard_path + '/*'
os.system(cmd)
if Graph_3conv == 1:
import Graph_3conv as g3
print 'import graph3'
n_hidden, learning_rate, dropout, dropout_1s, optimizer_type, pred, x, y, \
keep_prob, optimizer, accuracy, cost, summary_op = g3.define_model()
if Graph_3conv_same_dropout == 1:
import Graph_3conv_uni
if Graph_4conv == 1:
import Graph_4conv as g4
print 'import graph4'
n_hidden, learning_rate, dropout, dropout_1s, optimizer_type, pred, x, y, \
keep_prob, optimizer, accuracy, cost, summary_op = g4.define_model()
################################################
# hy: display jpeg image via iPython for terminal and Qt-based, web-based notebook
# Image of IPython package will cause conflict with Image for Python
# error like 'Image does not have attribute fromarray
# from cStringIO import StringIO
# from IPython.display import clear_output, Image, display
# def showarray(a, fmt='jpeg'):
# a = np.uint8(np.clip(a, 0, 255))
# f = StringIO()
# PIL.Image.fromarray(a).save(f, fmt)
# display(Image(data=f.getvalue()))
# use ipython
# img = np.float32(PIL.Image.open('../Data/tmp_rz82_d8_8.jpg'))
# showarray(img)
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
from datetime import datetime
str_log = optimizer_type + str(n_hidden) + '_' + 'Rate' + str(learning_rate) + '_' + arch_str
self.log = open(datetime.now().strftime('../logs/log_%Y_%m_%d_%H_%M' + str_log + '.log'), "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
if log_on and (RETRAIN or CONTINUE_TRAIN or TEST_with_Video):
sys.stdout = Logger()
# hy:customized tensor model name
model_path_str = '../logs/model_' + optimizer_type + str(n_hidden) + '_h' + \
str(settings.h_resize) + '_w' + str(settings.w_resize) \
+ '_c' + str(n_classes) # hy include specs of model
##################### TRAINING ####################################
if RETRAIN or CONTINUE_TRAIN:
try:
total_images, digits, carimages, cartargets, f, val2_digits, val2_images, val2_targets, val2_f = tools.import_data()
train_size = int(total_images * TrainingProp)
print 'train size', train_size
batch_size = 200
# batch_size = int(train_size / n_classes * 2)# *2
print 'batch size', batch_size
val1_batch_xs, val1_batch_ys = digits.images[train_size + 1:total_images - 1], \
digits.target[train_size + 1:total_images - 1]
val2_batch_xs, val2_batch_ys = val2_digits.images[0:len(val2_images) - 1], \
val2_digits.target[0:len(val2_images) - 1] # hy: use calc size
except:
print 'Check if file is created correctly. Setting an array element with a sequence.'
# Launch the graph
with tf.Session() as sess:
saver = tf.train.Saver() # hy:
if RETRAIN:
# Initializing the variables
init = tf.initialize_all_variables()
sess.run(init)
if CONTINUE_TRAIN:
#sess,saver = tools.load_classifier_model(sess, '../logs/', classifier_model=classifier_model)
#'''
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="../logs/")
print 'ckpt', ckpt, 'ckpt path', ckpt.model_checkpoint_path
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Continue to train with ", ckpt.model_checkpoint_path
else:
print 'not found model'
#'''
elapsed_time = time.time() - start_time
print 'Total elapsed time3:', "{:.2f}".format(elapsed_time), 's'
# hy: added to display all results in one graph
train_writer = tf.train.SummaryWriter(tensorboard_path + '/train', sess.graph)
validation_writer = tf.train.SummaryWriter(tensorboard_path + '/vali', sess.graph)
test_writer = tf.train.SummaryWriter(tensorboard_path + '/test', sess.graph)
# from datetime import datetime
# tensorboard_path = '../Tensorboard_data/sum107/'+str(datetime.now())+'/'
# summary_writer = tf.train.SummaryWriter(tensorboard_path, graph_def=sess.graph_def)
if RETRAIN:
step = 1
if CONTINUE_TRAIN:
step = current_step
# hy register finished class learning
acc_pre = 0
# Keep training until reach max iterations
while step < training_iters and not set_STOP:
# Only a part of data base is used for training, the rest is used for validation
# batch_xs, batch_ys = digits.images[0:850], digits.target[0:850]
for batch_step in xrange(int(train_size / batch_size)):
batch_xs, batch_ys = digits.images[int(batch_step * batch_size):(batch_step + 1) * batch_size - 1], \
digits.target[batch_step * batch_size:(batch_step + 1) * batch_size - 1]
print 'batch', batch_step, ', from', int(batch_step * batch_size), 'to', (batch_step + 1) * batch_size - 1
## Training ####################################################################
# hy:define training size - batch size 75% of data base size,
# training_size = int(total_images * TrainingProp)
# print 'training size:', training_size
# batch_xs, batch_ys = digits.images[0:training_size], digits.target[0:training_size]
# batch_xs, batch_ys = digits.images[851:1050], digits.target[851:1050]
# Fit training using batch data
# sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
try:
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
# Calculate batch accuracy
train_acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout_1s})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout_1s})
except:
print '\n[Hint] if error occurs, check data input path, settings label size, \ninput tensor size, input for densel' \
'is multiplication of the dimension sizes (HxWxD) of previous layer and view size for conv layers, \notherwise, the input tensor size must be changed'
if step % display_step == 0:
# elapsed_time = time.time() - start_time
# print 'Up to now elapsed time:', "{:.2f}".format(elapsed_time / 60), 'min'
# if step % time_display_interval == 0:
elapsed_time = time.time() - start_time
print 'Up to now elapsed time:', "{:.2f}".format(elapsed_time / 60), 'min'
print "\nIter " + str(step) + '-' + str(batch_step) + ", Minibatch Loss= " + "{:.6f}".format(
loss) + ", Training Accuracy= " \
+ "{:.4f}".format(train_acc)
# summary_str = sess.run(summary, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
# summary_writer.add_summary(summary_str, step)
## Validation ####################################################################
val1_acc = sess.run(accuracy, feed_dict={x: val1_batch_xs, y: val1_batch_ys, keep_prob: dropout_1s})
val2_acc = sess.run(accuracy, feed_dict={x: val2_batch_xs, y: val2_batch_ys, keep_prob: dropout_1s})
val2_loss = sess.run(cost, feed_dict={x: val2_batch_xs, y: val2_batch_ys, keep_prob: dropout_1s})
print "Validation accuracy=", "{:.4f}".format(val1_acc), ',' "test accuracy=", "{:.4f}".format(val2_acc)
# print("Argmax of y:", targetindex)
output = sess.run(pred, feed_dict={x: val2_batch_xs, y: val2_batch_ys, keep_prob: dropout_1s})
# print "Targets:", batch_ys
# output = tools.convert_to_confidence(output)
# hy: changed normalized=False to True
confMat, confMat2 = confusion_matrix(val2_batch_ys, output, normalized=True)
np.set_printoptions(precision=2) # hy: set display floating point, not changing real value
# print 'Iter:', str(step), ' confMat'
tools.print_label_title()
print confMat # hy: shape n_classes x n_classes
print "\nconfinksMat2"
print confMat2
# print np.sum(confMat)
# print output
# summary_str = sess.run(summary_op, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
# summary_writer.add_summary(summary_str, step)
# hy: added to display all results in one graph
train_res = sess.run(summary_op, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout_1s})
train_writer.add_summary(train_res, step)
val1_res = sess.run(summary_op, feed_dict={x: val1_batch_xs, y: val1_batch_ys, keep_prob: dropout_1s})
validation_writer.add_summary(val1_res, step)
val2_res = sess.run(summary_op, feed_dict={x: val2_batch_xs, y: val2_batch_ys, keep_prob: dropout_1s})
test_writer.add_summary(val2_res, step)
# Retrain hy: control stop
max_classes_names = []
max_classes_pres = []
test_acc_str = 'n'
name_ext = ''
sum_col = 0
sum_col_min = n_classes
for n in xrange(n_classes):
max_of_row = max(confMat[n, :])
max_of_col = max(confMat[:, n])
diagnal_pres = confMat[n, n]
if max_of_row == max_of_col and max_of_row == diagnal_pres and sum_col < n_classes:
diagnal_pres = round(diagnal_pres, 2)
sum_col = sum(confMat[:, n])
print 'sum_col:', sum_col, settings.LABEL_names[n]
if sum_col < 1.1 and diagnal_pres > 0.6:
sum_col_min = min(sum_col_min, sum_col)
max_class = settings.LABEL_short[n]
max_classes_names.append(max_class)
max_classes_pres.append(diagnal_pres)
print 'new max value', diagnal_pres, ', class', settings.LABEL_names[n], 'col_sum', sum_col
num_of_classified_classes = len(max_classes_names)
# print 'collection:',max_classes_names,',',max_classes_pres, ', num:',num_of_classified_classes, 'name_ext:',name_ext
if save_all_models == 1:
saver.save(sess, save_path=model_path_str + 'all_' + str(batch_step) + '_' + str(round(val2_acc, 2)),
global_step=step) # hy: added. It saves both all variables and GRAPH
if (num_of_classified_classes > 1) or loss < last_loss or val2_acc > last_best_test_acc:
if loss < last_loss:
last_loss = loss
if val2_acc > last_best_test_acc:
last_best_acc = val2_acc
test_acc_str = str(round(last_best_acc, 2))
# Save the model
if num_of_classified_classes > 2 and sum_col_min < 1.01 and val2_acc > last_best_test_acc - 0.001 \
and loss < 0.09 and val2_acc > 0.7:
for p in xrange(num_of_classified_classes):
name_ext += '_' + max_classes_names[p] + str(max_classes_pres[p])
name_ext += '_' + str(batch_step) + '_' + str(round(val2_acc, 2))
print 'save model', name_ext
# saver.save(sess, save_path=model_path_str + '_I', global_step=step) # hy: it only saves variables
saver.save(sess, save_path=model_path_str + '_' + str(batch_step) + '_' + arch_str + name_ext,
global_step=step) # hy: added. It saves GRAPH
cmd = 'mv ../logs/model*' + arch_str + '* ' + tensor_model_sum_path
os.system(cmd)
cmd = 'rm ../logs/model*'
os.system(cmd)
if val2_acc > 0.2 and (float(val2_loss / loss) > stop_loss
or float(train_acc / val2_acc) > stop_acc_diff) \
or float(loss / last_loss) > stop_train_loss_increase_rate:
if float(val2_loss / loss) > stop_loss:
print 'Overfitting: loss gap'
if float(train_acc / val2_acc) > stop_acc_diff:
print 'Training will be terminated because of overfitting.'
if float(loss / last_loss) > stop_train_loss_increase_rate:
print 'Training will be terminated because of increasing loss'
set_STOP = True
val2_acc = 1
imgNum = len([name for name in os.listdir(settings.data + settings.LABELS[0]) if
os.path.isfile(os.path.join(settings.data + settings.LABELS[0], name))])
# if (acc - val2_acc) > 0.1 and imgNum < 3* settings.maxNumSaveFiles: #hy: activate random rotation
if val2_acc > act_min and val2_acc < act_max and imgNum < 2.3 * settings.maxNumSaveFiles: # hy: activate random rotation
# rotation_angle = np.random.rand(0, 180) #hy: not working
rotation_angle = randint(15, 170)
noise_level = 0.01 * randint(1, 2)
if imgNum > 2 * settings.maxNumSaveFiles:
tools.REMOVE_online_Data(step)
prep_image.rotateflipImg(rotation_angle, 0, noise_level, step) # hy: angle,flipX,noise_level,step
add_data = 1
# training_size = int(total_images * TrainingProp)
# batch_xs, batch_ys = digits.images[0:training_size], digits.target[0:training_size]
'''
#hy try to adjust learning rate automatically, unsupervised learning
if acc < acc_pre * 0.7:
learning_rate = learning_rate * 1.1
print 'increase learning_rate:', learning_rate
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,
keep_prob: dropout}) # hy should not restart here, not interative any more
elif acc_pre <> 0 and acc > 2.6 * acc_pre:
learning_rate = learning_rate * 0.1
print 'reduce learning_rate:', learning_rate
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,
keep_prob: dropout}) # hy should not restart here, not interative any more
acc_pre = acc
'''
if add_data == 1:
print 'update data list'
tools.prepare_list(LABEL_LIST, LABEL_PATH) # hy: update file_list
total_images, digits, carimages, cartargets, f, val2_digits, val2_images, val2_targets, val2_f = tools.import_data()
training_size = int(total_images * TrainingProp)
# total_images = len(carimages)
# hy:define training size - batch size 75% of data base size,
# batch_xs, batch_ys = digits.images[0:training_size], digits.target[0:training_size]
# cannot change add_data to 0
step += 10
print "\nOptimization Finished!"
#####################################################################################################
################## TEST with Video ###########################
#####################################################################################################
if TEST_with_Video:
with tf.Session() as sess:
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="")
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Evaluation with video, model", ckpt.model_checkpoint_path
else:
print 'not found model'
print 'Test with video starting ...'
# for video_index in xrange(1):
video_list = ['hinten/', 'links/', 'oben/', 'rechts/', 'unten/', 'vorn/']
for video_index in xrange(len(video_list)):
TestFace = settings.LABELS[0][
:-1] # only one 'vorn' #'hinten' # full, 0 hinten, 1 links, 2 oben, 3 rechts, 4 unten, 5 vorn,
# TestFace = video_list[video_index][:-1] # all # full, 0 hinten, 1 links, 2 oben, 3 rechts, 4 unten, 5 vorn,
print 'Test face:', TestFace
# TestFace = settings.LABELS[video_index][:-1] #'vorn' #'hinten' # full, 0 hinten, 1 links, 2 oben, 3 rechts, 4 unten, 5 vorn,
VIDEO_FILE, crop_x1, crop_y1, area_step_size, video_label = tools.set_video_window(TestFace, video_window_scale)
# hy: info
# print "shape after set_video_window:x1,y1:", crop_x1, ", ", crop_y1
# track_frame = track_roi(VIDEO_FILE)
# Evaluate_VIDEO_track_roi(VIDEO_FILE)
EVALUATE_VIDEO(VIDEO_FILE)
print 'test face:', TestFace, 'done\n'
# TestFace = 'vorn'
# VIDEO_FILE, crop_x1, crop_y1, area_step_size, video_label = set_video_window(TestFace, video_window_scale)
# EVALUATE_VIDEO(VIDEO_FILE)
# print 'test face:', TestFace, 'done\n'
# hy: another option - automatically move ROI downwards, then to the right
# crop_y1 = crop_y1 + area_step_size/50
# if crop_y2+area_step_size >= frame.shape[0]:
# crop_y1 = 0
# crop_x1 = crop_x1 + 200
# if crop_x2+area_step_size >= frame.shape[1]:
##crop_x1 = 0
# break
#####################################################################################################
##hy: ################ TEST with IMAGES (eva) #######################
#####################################################################################################
init = tf.initialize_all_variables() # hy
if TEST_with_Images:
# hy: use a previous model
# hy: load model at checkpoint
# method 1
with tf.Session() as sess:
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="")
if ckpt and ckpt.model_checkpoint_path:
print 'Test with images with', ckpt.model_checkpoint_path
saver.restore(sess, ckpt.model_checkpoint_path)
print 'Test list for image test', LABEL_LIST_TEST, 'labels', LABELS
confMat1_TEST_i, count_labels, confMat3, class_probability = EVALUATE_IMAGES(sess, 6, LABEL_LIST_TEST, LABELS)
# filename = ".".join([tf.latest_checkpoint('/tmp/my-tensor-model.meta'), "meta"])
# tf.train.import_meta_graph(filename)
# hparams = tf.get_collection("hparams")
# print 'hyper parameters:', hparams
print 'Count classified in each class for detailed analysis'
tools.print_label_title()
print confMat1_TEST_i
######################################################################################
######################################################################################
# https://github.com/tensorflow/tensorflow/issues/3270 load two models
# hy option2
# EVALUATE_IMAGES_VAGUE()
#####################################################################################################
##hy: ################ Test with Webcam #######################
#####################################################################################################
if TEST_with_Webcam:
with tf.Session() as sess:
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="")
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Evaluation live frames with", ckpt.model_checkpoint_path
else:
print 'not found model'
print 'Test with Webcam starting ...'
# Camera 0 is the integrated web cam on my netbook
camera_port = 0
# EVALUATE_WITH_WEBCAM_track_roi(camera_port)
EVALUATE_WITH_WEBCAM(camera_port, False)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
## TEST with WEBCAM END
cv2.waitKey(0)
cv2.destroyAllWindows()
# hy:total time
#####################################################################################################
##hy: ################ Test End #######################
#####################################################################################################
elapsed_time = time.time() - start_time
print 'Total elapsed time:', "{:.2f}".format(elapsed_time / 60), 'min'
# TODO correcting some of the samples, sometimes the window is a bit large
# TODO Consider bigger images for training, details of a car are not clear in small images
# check at first place if you read images correctly, that incorrecr PIL image that appears at the beginning
# check if 0 is nocar or 1 is nocar
# TODO adding noise can help detection, it can also show power of deep learning as compared to other approaches
# TODO adding noise can show power of deep learning as compared to other approaches
# TODO check above again for making sure
# TODO check print of images for /255 and other numerical compatibility
# TODO check making fake bigger images of the database and see if it works
# TODO check if size of the cars in the street images are appropriate
# TODO try another street image
# TODO adding batch processing ..., researching and reading about batch processing ...
# TODO Histogram normalization or making sure that colors are similar
# TODO change it to correct batch mode, but not Tensorflow batch
# TODO add more negative and better negative examples
# TODO make imbalance between negative and positive samples
# TODO consider confidence measure
# TODO blur images!
# TODO Merge rectangle, use aspect ratio to remove false alarms
# TODO use density of detections in order to remove false alarms
# TODO merge rectangles
# TODO use video cues
# TODO Use a few trained network in parallel, they can only be different in terms of initialization, then vote, it significantly reduces false alarms
# Cars are always correctly detectd, but background detection changes ...
# TODO Save models, with a good name
| apache-2.0 |
gVallverdu/pymatgen | pymatgen/io/gaussian.py | 2 | 59683 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements input and output processing from Gaussian.
"""
import re
import numpy as np
import warnings
from pymatgen.core.operations import SymmOp
from pymatgen import Element, Molecule, Composition
from monty.io import zopen
from pymatgen.core.units import Ha_to_eV
from pymatgen.util.coord import get_angle
import scipy.constants as cst
from pymatgen.electronic_structure.core import Spin
__author__ = 'Shyue Ping Ong, Germain Salvato-Vallverdu, Xin Chen'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = '[email protected]'
__date__ = '8/1/15'
float_patt = re.compile(r"\s*([+-]?\d+\.\d+)")
def read_route_line(route):
"""
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
"""
scrf_patt = re.compile(r"^([sS][cC][rR][fF])\s*=\s*(.+)")
multi_params_patt = re.compile(r"^([A-z]+[0-9]*)[\s=]+\((.*)\)$")
functional = None
basis_set = None
route_params = {}
dieze_tag = None
if route:
if "/" in route:
tok = route.split("/")
functional = tok[0].split()[-1]
basis_set = tok[1].split()[0]
for tok in [functional, basis_set, "/"]:
route = route.replace(tok, "")
for tok in route.split():
if scrf_patt.match(tok):
m = scrf_patt.match(tok)
route_params[m.group(1)] = m.group(2)
elif tok.upper() in ["#", "#N", "#P", "#T"]:
# does not store # in route to avoid error in input
if tok == "#":
dieze_tag = "#N"
else:
dieze_tag = tok
continue
else:
m = re.match(multi_params_patt, tok.strip("#"))
if m:
pars = {}
for par in m.group(2).split(","):
p = par.split("=")
pars[p[0]] = None if len(p) == 1 else p[1]
route_params[m.group(1)] = pars
else:
d = tok.strip("#").split("=")
route_params[d[0]] = None if len(d) == 1 else d[1]
return functional, basis_set, route_params, dieze_tag
class GaussianInput:
"""
An object representing a Gaussian input file.
"""
# Commonly used regex patterns
_zmat_patt = re.compile(r"^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$")
_xyz_patt = re.compile(r"^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+"
r"([\d\.eE\-]+)[\-\.\s,\w.]*$")
def __init__(self, mol, charge=None, spin_multiplicity=None, title=None,
functional="HF", basis_set="6-31G(d)", route_parameters=None,
input_parameters=None, link0_parameters=None, dieze_tag="#P",
gen_basis=None):
"""
Args:
mol: Input molecule. It can either be a Molecule object,
a string giving the geometry in a format supported by Guassian,
or ``None``. If the molecule is ``None``, you will need to use
read it in from a checkpoint. Consider adding ``CHK`` to the
``link0_parameters``.
charge: Charge of the molecule. If None, charge on molecule is used.
Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
If ``mol`` is not a Molecule object, then you must specify a charge.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons. If ``mol`` is not a Molecule object, then you
must specify the multiplicity
title: Title for run. Defaults to formula of molecule if None.
functional: Functional for run.
basis_set: Basis set for run.
route_parameters: Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
input_parameters: Additional input parameters for run as a dict. Used
for example, in PCM calculations. E.g., {"EPS":12}
link0_parameters: Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
dieze_tag: # preceding the route line. E.g. "#p"
gen_basis: allows a user-specified basis set to be used in a Gaussian
calculation. If this is not None, the attribute ``basis_set`` will
be set to "Gen".
"""
self._mol = mol
# Determine multiplicity and charge settings
if isinstance(mol, Molecule):
self.charge = charge if charge is not None else mol.charge
nelectrons = - self.charge + mol.charge + mol.nelectrons
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
self.charge, spin_multiplicity))
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
# Get a title from the molecule name
self.title = title if title else self._mol.composition.formula
else:
if charge is None or spin_multiplicity is None:
raise ValueError('`charge` and `spin_multiplicity` must be specified')
self.charge = charge
self.spin_multiplicity = spin_multiplicity
# Set a title
self.title = title if title else 'Restart'
# Store the remaining settings
self.functional = functional
self.basis_set = basis_set
self.link0_parameters = link0_parameters if link0_parameters else {}
self.route_parameters = route_parameters if route_parameters else {}
self.input_parameters = input_parameters if input_parameters else {}
self.dieze_tag = dieze_tag if dieze_tag[0] == "#" else "#" + dieze_tag
self.gen_basis = gen_basis
if gen_basis is not None:
self.basis_set = "Gen"
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
@staticmethod
def _parse_coords(coord_lines):
"""
Helper method to parse coordinates.
"""
paras = {}
var_pattern = re.compile(r"^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1).strip("=")] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and GaussianInput._xyz_patt.match(l):
m = GaussianInput._xyz_patt.match(l)
species.append(m.group(1))
toks = re.split(r"[,\s]+", l.strip())
if len(toks) > 4:
coords.append([float(i) for i in toks[2:5]])
else:
coords.append([float(i) for i in toks[1:4]])
elif GaussianInput._zmat_patt.match(l):
zmode = True
toks = re.split(r"[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0, 0, 0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array([0, 0, parameters[0]]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis,
angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(
coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(
coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def _parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub(r"\d", "", sp_str)
return sp.capitalize()
species = [_parse_species(sp) for sp in species]
return Molecule(species, coords)
@staticmethod
def from_string(contents):
"""
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
"""
lines = [l.strip() for l in contents.split("\n")]
link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)")
link0_dict = {}
for i, l in enumerate(lines):
if link0_patt.match(l):
m = link0_patt.match(l)
link0_dict[m.group(1).strip("=")] = m.group(2)
route_patt = re.compile(r"^#[sSpPnN]*.*")
route = ""
route_index = None
for i, l in enumerate(lines):
if route_patt.match(l):
route += " " + l
route_index = i
# This condition allows for route cards spanning multiple lines
elif (l == "" or l.isspace()) and route_index:
break
functional, basis_set, route_paras, dieze_tag = read_route_line(route)
ind = 2
title = []
while lines[route_index + ind].strip():
title.append(lines[route_index + ind].strip())
ind += 1
title = ' '.join(title)
ind += 1
toks = re.split(r"[,\s]+", lines[route_index + ind])
charge = int(float(toks[0]))
spin_mult = int(toks[1])
coord_lines = []
spaces = 0
input_paras = {}
ind += 1
for i in range(route_index + ind, len(lines)):
if lines[i].strip() == "":
spaces += 1
if spaces >= 2:
d = lines[i].split("=")
if len(d) == 2:
input_paras[d[0]] = d[1]
else:
coord_lines.append(lines[i].strip())
mol = GaussianInput._parse_coords(coord_lines)
mol.set_charge_and_spin(charge, spin_mult)
return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult,
title=title, functional=functional,
basis_set=basis_set,
route_parameters=route_paras,
input_parameters=input_paras,
link0_parameters=link0_dict,
dieze_tag=dieze_tag)
@staticmethod
def from_file(filename):
"""
Creates GaussianInput from a file.
Args:
filename: Gaussian input filename
Returns:
GaussianInput object
"""
with zopen(filename, "r") as f:
return GaussianInput.from_string(f.read())
def _find_nn_pos_before_site(self, siteindex):
"""
Returns index of nearest neighbor atoms.
"""
alldist = [(self._mol.get_distance(siteindex, i), i)
for i in range(siteindex)]
alldist = sorted(alldist, key=lambda x: x[0])
return [d[1] for d in alldist]
def get_zmatrix(self):
"""
Returns a z-matrix representation of the molecule.
"""
output = []
outputvar = []
for i, site in enumerate(self._mol):
if i == 0:
output.append("{}".format(site.specie))
elif i == 1:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
output.append("{} {} B{}".format(self._mol[i].specie,
nn[0] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
elif i == 2:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
output.append("{} {} B{} {} A{}".format(self._mol[i].specie,
nn[0] + 1, i,
nn[1] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
outputvar.append("A{}={:.6f}".format(i, angle))
else:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
dih = self._mol.get_dihedral(i, nn[0], nn[1], nn[2])
output.append("{} {} B{} {} A{} {} D{}"
.format(self._mol[i].specie, nn[0] + 1, i,
nn[1] + 1, i, nn[2] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
outputvar.append("A{}={:.6f}".format(i, angle))
outputvar.append("D{}={:.6f}".format(i, dih))
return "\n".join(output) + "\n\n" + "\n".join(outputvar)
def get_cart_coords(self):
"""
Return the cartesian coordinates of the molecule
"""
def to_s(x):
return "%0.6f" % x
outs = []
for i, site in enumerate(self._mol):
outs.append(" ".join([site.species_string,
" ".join([to_s(j) for j in site.coords])]))
return "\n".join(outs)
def __str__(self):
return self.to_string()
def to_string(self, cart_coords=False):
"""
Return GaussianInput string
Option: whe cart_coords sets to True return the cartesian coordinates
instead of the z-matrix
"""
def para_dict_to_string(para, joiner=" "):
para_str = []
# sorted is only done to make unittests work reliably
for par, val in sorted(para.items()):
if val is None or val == "":
para_str.append(par)
elif isinstance(val, dict):
val_str = para_dict_to_string(val, joiner=",")
para_str.append("{}=({})".format(par, val_str))
else:
para_str.append("{}={}".format(par, val))
return joiner.join(para_str)
output = []
if self.link0_parameters:
output.append(para_dict_to_string(self.link0_parameters, "\n"))
output.append("{diez} {func}/{bset} {route}"
.format(diez=self.dieze_tag, func=self.functional,
bset=self.basis_set,
route=para_dict_to_string(self.route_parameters))
)
output.append("")
output.append(self.title)
output.append("")
output.append("%d %d" % (self.charge, self.spin_multiplicity))
if isinstance(self._mol, Molecule):
if cart_coords is True:
output.append(self.get_cart_coords())
else:
output.append(self.get_zmatrix())
elif self._mol is not None:
output.append(str(self._mol))
output.append("")
if self.gen_basis is not None:
output.append("{:s}\n".format(self.gen_basis))
output.append(para_dict_to_string(self.input_parameters, "\n"))
output.append("\n")
return "\n".join(output)
def write_file(self, filename, cart_coords=False):
"""
Write the input string into a file
Option: see __str__ method
"""
with zopen(filename, "w") as f:
f.write(self.to_string(cart_coords))
def as_dict(self):
"""
:return: MSONable dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"functional": self.functional,
"basis_set": self.basis_set,
"route_parameters": self.route_parameters,
"title": self.title,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"input_parameters": self.input_parameters,
"link0_parameters": self.link0_parameters,
"dieze_tag": self.dieze_tag}
@classmethod
def from_dict(cls, d):
"""
:param d: dict
:return: GaussianInput
"""
return GaussianInput(mol=Molecule.from_dict(d["molecule"]),
functional=d["functional"],
basis_set=d["basis_set"],
route_parameters=d["route_parameters"],
title=d["title"],
charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
input_parameters=d["input_parameters"],
link0_parameters=d["link0_parameters"])
class GaussianOutput:
"""
Parser for Gaussian output files.
.. note::
Still in early beta.
Attributes:
.. attribute:: structures
All structures from the calculation in the standard orientation. If the
symmetry is not considered, the standard orientation is not printed out
and the input orientation is used instead. Check the `standard_orientation`
attribute.
.. attribute:: structures_input_orientation
All structures from the calculation in the input orientation.
.. attribute:: energies
All energies from the calculation.
.. attribute:: eigenvalues
List of eigenvalues for the last geometry
.. attribute:: MO_coefficients
Matrix of MO coefficients for the last geometry
.. attribute:: cart_forces
All cartesian forces from the calculation.
.. attribute:: frequencies
A list for each freq calculation and for each mode of a dict with
{
"frequency": freq in cm-1,
"symmetry": symmetry tag
"r_mass": Reduce mass,
"f_constant": force constant,
"IR_intensity": IR Intensity,
"mode": normal mode
}
The normal mode is a 1D vector of dx, dy dz of each atom.
.. attribute:: hessian
Matrix of second derivatives of the energy with respect to cartesian
coordinates in the **input orientation** frame. Need #P in the
route section in order to be in the output.
.. attribute:: properly_terminated
True if run has properly terminated
.. attribute:: is_pcm
True if run is a PCM run.
.. attribute:: is_spin
True if it is an unrestricted run
.. attribute:: stationary_type
If it is a relaxation run, indicates whether it is a minimum (Minimum)
or a saddle point ("Saddle").
.. attribute:: corrections
Thermochemical corrections if this run is a Freq run as a dict. Keys
are "Zero-point", "Thermal", "Enthalpy" and "Gibbs Free Energy"
.. attribute:: functional
Functional used in the run.
.. attribute:: basis_set
Basis set used in the run
.. attribute:: route
Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
.. attribute:: dieze_tag
# preceding the route line, e.g. "#P"
.. attribute:: link0
Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
.. attribute:: charge
Charge for structure
.. attribute:: spin_multiplicity
Spin multiplicity for structure
.. attribute:: num_basis_func
Number of basis functions in the run.
.. attribute:: electrons
number of alpha and beta electrons as (N alpha, N beta)
.. attribute:: pcm
PCM parameters and output if available.
.. attribute:: errors
error if not properly terminated (list to be completed in error_defs)
.. attribute:: Mulliken_charges
Mulliken atomic charges
.. attribute:: eigenvectors
Matrix of shape (num_basis_func, num_basis_func). Each column is an
eigenvectors and contains AO coefficients of an MO.
eigenvectors[Spin] = mat(num_basis_func, num_basis_func)
.. attribute:: molecular_orbital
MO development coefficients on AO in a more convenient array dict
for each atom and basis set label.
mo[Spin][OM j][atom i] = {AO_k: coeff, AO_k: coeff ... }
.. attribute:: atom_basis_labels
Labels of AO for each atoms. These labels are those used in the output
of molecular orbital coefficients (POP=Full) and in the
molecular_orbital array dict.
atom_basis_labels[iatom] = [AO_k, AO_k, ...]
.. attribute:: resumes
List of gaussian data resume given at the end of the output file before
the quotation. The resumes are given as string.
.. attribute:: title
Title of the gaussian run.
.. attribute:: standard_orientation
If True, the geometries stored in the structures are in the standard
orientation. Else, the geometries are in the input orientation.
.. attribute:: bond_orders
Dict of bond order values read in the output file such as:
{(0, 1): 0.8709, (1, 6): 1.234, ...}
The keys are the atom indexes and the values are the Wiberg bond indexes
that are printed using `pop=NBOREAD` and `$nbo bndidx $end`.
Methods:
.. method:: to_input()
Return a GaussianInput object using the last geometry and the same
calculation parameters.
.. method:: read_scan()
Read a potential energy surface from a gaussian scan calculation.
.. method:: get_scan_plot()
Get a matplotlib plot of the potential energy surface
.. method:: save_scan_plot()
Save a matplotlib plot of the potential energy surface to a file
"""
def __init__(self, filename):
"""
Args:
filename: Filename of Gaussian output file.
"""
self.filename = filename
self._parse(filename)
@property
def final_energy(self):
"""
:return: Final energy in Gaussian output.
"""
return self.energies[-1]
@property
def final_structure(self):
"""
:return: Final structure in Gaussian output.
"""
return self.structures[-1]
def _parse(self, filename):
start_patt = re.compile(r" \(Enter \S+l101\.exe\)")
route_patt = re.compile(r" #[pPnNtT]*.*")
link0_patt = re.compile(r"^\s(%.+)\s*=\s*(.+)")
charge_mul_patt = re.compile(r"Charge\s+=\s*([-\d]+)\s+"
r"Multiplicity\s+=\s*(\d+)")
num_basis_func_patt = re.compile(r"([0-9]+)\s+basis functions")
num_elec_patt = re.compile(r"(\d+)\s+alpha electrons\s+(\d+)\s+beta electrons")
pcm_patt = re.compile(r"Polarizable Continuum Model")
stat_type_patt = re.compile(r"imaginary frequencies")
scf_patt = re.compile(r"E\(.*\)\s*=\s*([-\.\d]+)\s+")
mp2_patt = re.compile(r"EUMP2\s*=\s*(.*)")
oniom_patt = re.compile(r"ONIOM:\s+extrapolated energy\s*=\s*(.*)")
termination_patt = re.compile(r"(Normal|Error) termination")
error_patt = re.compile(
r"(! Non-Optimized Parameters !|Convergence failure)")
mulliken_patt = re.compile(
r"^\s*(Mulliken charges|Mulliken atomic charges)")
mulliken_charge_patt = re.compile(
r'^\s+(\d+)\s+([A-Z][a-z]?)\s*(\S*)')
end_mulliken_patt = re.compile(
r'(Sum of Mulliken )(.*)(charges)\s*=\s*(\D)')
std_orientation_patt = re.compile(r"Standard orientation")
input_orientation_patt = re.compile(r"Input orientation")
orbital_patt = re.compile(r"(Alpha|Beta)\s*\S+\s*eigenvalues --(.*)")
thermo_patt = re.compile(r"(Zero-point|Thermal) correction(.*)="
r"\s+([\d\.-]+)")
forces_on_patt = re.compile(
r"Center\s+Atomic\s+Forces\s+\(Hartrees/Bohr\)")
forces_off_patt = re.compile(r"Cartesian\s+Forces:\s+Max.*RMS.*")
forces_patt = re.compile(
r"\s+(\d+)\s+(\d+)\s+([0-9\.-]+)\s+([0-9\.-]+)\s+([0-9\.-]+)")
freq_on_patt = re.compile(
r"Harmonic\sfrequencies\s+\(cm\*\*-1\),\sIR\sintensities.*Raman.*")
normal_mode_patt = re.compile(
r"\s+(\d+)\s+(\d+)\s+([0-9\.-]{4,5})\s+([0-9\.-]{4,5}).*")
mo_coeff_patt = re.compile(r"Molecular Orbital Coefficients:")
mo_coeff_name_patt = re.compile(r"\d+\s((\d+|\s+)\s+([a-zA-Z]{1,2}|\s+))\s+(\d+\S+)")
hessian_patt = re.compile(r"Force constants in Cartesian coordinates:")
resume_patt = re.compile(r"^\s1\\1\\GINC-\S*")
resume_end_patt = re.compile(r"^\s.*\\\\@")
bond_order_patt = re.compile(r"Wiberg bond index matrix in the NAO basis:")
self.properly_terminated = False
self.is_pcm = False
self.stationary_type = "Minimum"
self.corrections = {}
self.energies = []
self.pcm = None
self.errors = []
self.Mulliken_charges = {}
self.link0 = {}
self.cart_forces = []
self.frequencies = []
self.eigenvalues = []
self.is_spin = False
self.hessian = None
self.resumes = []
self.title = None
self.bond_orders = {}
read_coord = 0
read_mulliken = False
read_eigen = False
eigen_txt = []
parse_stage = 0
num_basis_found = False
terminated = False
parse_forces = False
forces = []
parse_freq = False
frequencies = []
read_mo = False
parse_hessian = False
routeline = ""
standard_orientation = False
parse_bond_order = False
input_structures = list()
std_structures = list()
geom_orientation = None
with zopen(filename) as f:
for line in f:
if parse_stage == 0:
if start_patt.search(line):
parse_stage = 1
elif link0_patt.match(line):
m = link0_patt.match(line)
self.link0[m.group(1)] = m.group(2)
elif route_patt.search(line) or routeline != "":
if set(line.strip()) == {"-"}:
params = read_route_line(routeline)
self.functional = params[0]
self.basis_set = params[1]
self.route_parameters = params[2]
route_lower = {k.lower(): v
for k, v in
self.route_parameters.items()}
self.dieze_tag = params[3]
parse_stage = 1
else:
routeline += line.strip()
elif parse_stage == 1:
if set(line.strip()) == {"-"} and self.title is None:
self.title = ""
elif self.title == "":
self.title = line.strip()
elif charge_mul_patt.search(line):
m = charge_mul_patt.search(line)
self.charge = int(m.group(1))
self.spin_multiplicity = int(m.group(2))
parse_stage = 2
elif parse_stage == 2:
if self.is_pcm:
self._check_pcm(line)
if "freq" in route_lower and thermo_patt.search(line):
m = thermo_patt.search(line)
if m.group(1) == "Zero-point":
self.corrections["Zero-point"] = float(m.group(3))
else:
key = m.group(2).strip(" to ")
self.corrections[key] = float(m.group(3))
if read_coord:
[f.readline() for i in range(3)]
line = f.readline()
sp = []
coords = []
while set(line.strip()) != {"-"}:
toks = line.split()
sp.append(Element.from_Z(int(toks[1])))
coords.append([float(x) for x in toks[3:6]])
line = f.readline()
read_coord = False
if geom_orientation == "input":
input_structures.append(Molecule(sp, coords))
elif geom_orientation == "standard":
std_structures.append(Molecule(sp, coords))
if parse_forces:
m = forces_patt.search(line)
if m:
forces.extend([float(_v)
for _v in m.groups()[2:5]])
elif forces_off_patt.search(line):
self.cart_forces.append(forces)
forces = []
parse_forces = False
# read molecular orbital eigenvalues
if read_eigen:
m = orbital_patt.search(line)
if m:
eigen_txt.append(line)
else:
read_eigen = False
self.eigenvalues = {Spin.up: []}
for eigenline in eigen_txt:
if "Alpha" in eigenline:
self.eigenvalues[Spin.up] += [float(e) for e in float_patt.findall(eigenline)]
elif "Beta" in eigenline:
if Spin.down not in self.eigenvalues:
self.eigenvalues[Spin.down] = []
self.eigenvalues[Spin.down] += [float(e) for e in float_patt.findall(eigenline)]
eigen_txt = []
# read molecular orbital coefficients
if (not num_basis_found) and num_basis_func_patt.search(line):
m = num_basis_func_patt.search(line)
self.num_basis_func = int(m.group(1))
num_basis_found = True
elif read_mo:
# build a matrix with all coefficients
all_spin = [Spin.up]
if self.is_spin:
all_spin.append(Spin.down)
mat_mo = {}
for spin in all_spin:
mat_mo[spin] = np.zeros((self.num_basis_func,
self.num_basis_func))
nMO = 0
end_mo = False
while nMO < self.num_basis_func and not end_mo:
f.readline()
f.readline()
self.atom_basis_labels = []
for i in range(self.num_basis_func):
line = f.readline()
# identify atom and OA labels
m = mo_coeff_name_patt.search(line)
if m.group(1).strip() != "":
iat = int(m.group(2)) - 1
# atname = m.group(3)
self.atom_basis_labels.append([m.group(4)])
else:
self.atom_basis_labels[iat].append(m.group(4))
# MO coefficients
coeffs = [float(c) for c in
float_patt.findall(line)]
for j in range(len(coeffs)):
mat_mo[spin][i, nMO + j] = coeffs[j]
nMO += len(coeffs)
line = f.readline()
# manage pop=regular case (not all MO)
if nMO < self.num_basis_func and \
("Density Matrix:" in line or
mo_coeff_patt.search(line)):
end_mo = True
warnings.warn("POP=regular case, matrix "
"coefficients not complete")
f.readline()
self.eigenvectors = mat_mo
read_mo = False
# build a more convenient array dict with MO
# coefficient of each atom in each MO.
# mo[Spin][OM j][atom i] =
# {AO_k: coeff, AO_k: coeff ... }
mo = {}
for spin in all_spin:
mo[spin] = [[{} for iat in
range(len(self.atom_basis_labels))]
for j in range(self.num_basis_func)]
for j in range(self.num_basis_func):
i = 0
for iat in range(len(self.atom_basis_labels)):
for label in self.atom_basis_labels[iat]:
mo[spin][j][iat][label] = self.eigenvectors[spin][i, j]
i += 1
self.molecular_orbital = mo
elif parse_freq:
while line.strip() != "": # blank line
ifreqs = [int(val) - 1 for val in line.split()]
for ifreq in ifreqs:
frequencies.append({"frequency": None,
"r_mass": None,
"f_constant": None,
"IR_intensity": None,
"symmetry": None,
"mode": []})
# read freq, intensity, masses, symmetry ...
while "Atom AN" not in line:
if "Frequencies --" in line:
freqs = map(float,
float_patt.findall(line))
for ifreq, freq in zip(ifreqs, freqs):
frequencies[ifreq]["frequency"] = freq
elif "Red. masses --" in line:
r_masses = map(float,
float_patt.findall(line))
for ifreq, r_mass in zip(ifreqs, r_masses):
frequencies[ifreq]["r_mass"] = r_mass
elif "Frc consts --" in line:
f_consts = map(float,
float_patt.findall(line))
for ifreq, f_const in zip(ifreqs, f_consts):
frequencies[ifreq]["f_constant"] = f_const
elif "IR Inten --" in line:
IR_intens = map(float,
float_patt.findall(line))
for ifreq, intens in zip(ifreqs, IR_intens):
frequencies[ifreq]["IR_intensity"] = intens
else:
syms = line.split()[:3]
for ifreq, sym in zip(ifreqs, syms):
frequencies[ifreq]["symmetry"] = sym
line = f.readline()
# read normal modes
line = f.readline()
while normal_mode_patt.search(line):
values = list(map(float,
float_patt.findall(line)))
for i, ifreq in zip(range(0, len(values), 3),
ifreqs):
frequencies[ifreq]["mode"].extend(values[i:i+3])
line = f.readline()
parse_freq = False
self.frequencies.append(frequencies)
frequencies = []
elif parse_hessian:
# read Hessian matrix under "Force constants in Cartesian coordinates"
# Hessian matrix is in the input orientation framework
# WARNING : need #P in the route line
parse_hessian = False
ndf = 3 * len(input_structures[0])
self.hessian = np.zeros((ndf, ndf))
j_indices = range(5)
jndf = 0
while jndf < ndf:
for i in range(jndf, ndf):
line = f.readline()
vals = re.findall(r"\s*([+-]?\d+\.\d+[eEdD]?[+-]\d+)", line)
vals = [float(val.replace("D", "E"))
for val in vals]
for jval, val in enumerate(vals):
j = j_indices[jval]
self.hessian[i, j] = val
self.hessian[j, i] = val
jndf += len(vals)
line = f.readline()
j_indices = [j + 5 for j in j_indices]
elif parse_bond_order:
# parse Wiberg bond order
line = f.readline()
line = f.readline()
nat = len(input_structures[0])
matrix = list()
for iat in range(nat):
line = f.readline()
matrix.append([float(v) for v in line.split()[2:]])
self.bond_orders = dict()
for iat in range(nat):
for jat in range(iat + 1, nat):
self.bond_orders[(iat, jat)] = matrix[iat][jat]
parse_bond_order = False
elif termination_patt.search(line):
m = termination_patt.search(line)
if m.group(1) == "Normal":
self.properly_terminated = True
terminated = True
elif error_patt.search(line):
error_defs = {
"! Non-Optimized Parameters !": "Optimization "
"error",
"Convergence failure": "SCF convergence error"
}
m = error_patt.search(line)
self.errors.append(error_defs[m.group(1)])
elif num_elec_patt.search(line):
m = num_elec_patt.search(line)
self.electrons = (int(m.group(1)), int(m.group(2)))
elif (not self.is_pcm) and pcm_patt.search(line):
self.is_pcm = True
self.pcm = {}
elif "freq" in route_lower and "opt" in route_lower and \
stat_type_patt.search(line):
self.stationary_type = "Saddle"
elif mp2_patt.search(line):
m = mp2_patt.search(line)
self.energies.append(float(m.group(1).replace("D",
"E")))
elif oniom_patt.search(line):
m = oniom_patt.matcher(line)
self.energies.append(float(m.group(1)))
elif scf_patt.search(line):
m = scf_patt.search(line)
self.energies.append(float(m.group(1)))
elif std_orientation_patt.search(line):
standard_orientation = True
geom_orientation = "standard"
read_coord = True
elif input_orientation_patt.search(line):
geom_orientation = "input"
read_coord = True
elif not read_eigen and orbital_patt.search(line):
eigen_txt.append(line)
read_eigen = True
elif mulliken_patt.search(line):
mulliken_txt = []
read_mulliken = True
elif not parse_forces and forces_on_patt.search(line):
parse_forces = True
elif freq_on_patt.search(line):
parse_freq = True
[f.readline() for i in range(3)]
elif mo_coeff_patt.search(line):
if "Alpha" in line:
self.is_spin = True
read_mo = True
elif hessian_patt.search(line):
parse_hessian = True
elif resume_patt.search(line):
resume = []
while not resume_end_patt.search(line):
resume.append(line)
line = f.readline()
# security if \\@ not in one line !
if line == "\n":
break
resume.append(line)
resume = "".join([r.strip() for r in resume])
self.resumes.append(resume)
elif bond_order_patt.search(line):
parse_bond_order = True
if read_mulliken:
if not end_mulliken_patt.search(line):
mulliken_txt.append(line)
else:
m = end_mulliken_patt.search(line)
mulliken_charges = {}
for line in mulliken_txt:
if mulliken_charge_patt.search(line):
m = mulliken_charge_patt.search(line)
dic = {int(m.group(1)):
[m.group(2), float(m.group(3))]}
mulliken_charges.update(dic)
read_mulliken = False
self.Mulliken_charges = mulliken_charges
# store the structures. If symmetry is considered, the standard orientation
# is used. Else the input orientation is used.
if standard_orientation:
self.structures = std_structures
self.structures_input_orientation = input_structures
else:
self.structures = input_structures
self.structures_input_orientation = input_structures
if not terminated:
warnings.warn("\n" + self.filename +
": Termination error or bad Gaussian output file !")
def _check_pcm(self, line):
energy_patt = re.compile(r"(Dispersion|Cavitation|Repulsion) energy"
r"\s+\S+\s+=\s+(\S*)")
total_patt = re.compile(r"with all non electrostatic terms\s+\S+\s+"
r"=\s+(\S*)")
parameter_patt = re.compile(r"(Eps|Numeral density|RSolv|Eps"
r"\(inf[inity]*\))\s+=\s*(\S*)")
if energy_patt.search(line):
m = energy_patt.search(line)
self.pcm['{} energy'.format(m.group(1))] = float(m.group(2))
elif total_patt.search(line):
m = total_patt.search(line)
self.pcm['Total energy'] = float(m.group(1))
elif parameter_patt.search(line):
m = parameter_patt.search(line)
self.pcm[m.group(1)] = float(m.group(2))
def as_dict(self):
"""
Json-serializable dict representation.
"""
structure = self.final_structure
d = {"has_gaussian_completed": self.properly_terminated,
"nsites": len(structure)}
comp = structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
d["is_pcm"] = self.is_pcm
d["errors"] = self.errors
d["Mulliken_charges"] = self.Mulliken_charges
unique_symbols = sorted(list(d["unit_cell_formula"].keys()))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["charge"] = self.charge
d["spin_multiplicity"] = self.spin_multiplicity
vin = {"route": self.route_parameters, "functional": self.functional,
"basis_set": self.basis_set,
"nbasisfunctions": self.num_basis_func,
"pcm_parameters": self.pcm}
d["input"] = vin
nsites = len(self.final_structure)
vout = {
"energies": self.energies,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"molecule": structure.as_dict(),
"stationary_type": self.stationary_type,
"corrections": self.corrections
}
d['output'] = vout
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
def read_scan(self):
"""
Read a potential energy surface from a gaussian scan calculation.
Returns:
A dict: {"energies": [ values ],
"coords": {"d1": [ values ], "A2", [ values ], ... }}
"energies" are the energies of all points of the potential energy
surface. "coords" are the internal coordinates used to compute the
potential energy surface and the internal coordinates optimized,
labelled by their name as defined in the calculation.
"""
def floatList(l):
""" return a list of float from a list of string """
return [float(v) for v in l]
scan_patt = re.compile(r"^\sSummary of the potential surface scan:")
optscan_patt = re.compile(r"^\sSummary of Optimized Potential Surface Scan")
# data dict return
data = {"energies": list(), "coords": dict()}
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
while line != "":
if optscan_patt.match(line):
f.readline()
line = f.readline()
endScan = False
while not endScan:
data["energies"] += floatList(float_patt.findall(line))
line = f.readline()
while not re.search(r"(^\s+(\d+)|^\s-+)", line):
icname = line.split()[0].strip()
if icname in data["coords"]:
data["coords"][icname] += floatList(float_patt.findall(line))
else:
data["coords"][icname] = floatList(float_patt.findall(line))
line = f.readline()
if re.search(r"^\s-+", line):
endScan = True
else:
line = f.readline()
elif scan_patt.match(line):
line = f.readline()
data["coords"] = {icname: list()
for icname in line.split()[1:-1]}
f.readline()
line = f.readline()
while not re.search(r"^\s-+", line):
values = floatList(line.split())
data["energies"].append(values[-1])
for i, icname in enumerate(data["coords"]):
data["coords"][icname].append(values[i+1])
line = f.readline()
else:
line = f.readline()
return data
def get_scan_plot(self, coords=None):
"""
Get a matplotlib plot of the potential energy surface.
Args:
coords: internal coordinate name to use as abcissa.
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
d = self.read_scan()
if coords and coords in d["coords"]:
x = d["coords"][coords]
plt.xlabel(coords)
else:
x = range(len(d["energies"]))
plt.xlabel("points")
plt.ylabel("Energy (eV)")
e_min = min(d["energies"])
y = [(e - e_min) * Ha_to_eV for e in d["energies"]]
plt.plot(x, y, "ro--")
return plt
def save_scan_plot(self, filename="scan.pdf",
img_format="pdf", coords=None):
"""
Save matplotlib plot of the potential energy surface to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
coords: internal coordinate name to use as abcissa.
"""
plt = self.get_scan_plot(coords)
plt.savefig(filename, format=img_format)
def read_excitation_energies(self):
"""
Read a excitation energies after a TD-DFT calculation.
Returns:
A list: A list of tuple for each transition such as
[(energie (eV), lambda (nm), oscillatory strength), ... ]
"""
transitions = list()
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
td = False
while line != "":
if re.search(r"^\sExcitation energies and oscillator strengths:", line):
td = True
if td:
if re.search(r"^\sExcited State\s*\d", line):
val = [float(v) for v in float_patt.findall(line)]
transitions.append(tuple(val[0:3]))
line = f.readline()
return transitions
def get_spectre_plot(self, sigma=0.05, step=0.01):
"""
Get a matplotlib plot of the UV-visible xas. Transition are plotted
as vertical lines and as a sum of normal functions with sigma with. The
broadening is applied in energy and the xas is plotted as a function
of the wavelength.
Args:
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
Returns:
A dict: {"energies": values, "lambda": values, "xas": values}
where values are lists of abscissa (energies, lamba) and
the sum of gaussian functions (xas).
A matplotlib plot.
"""
from pymatgen.util.plotting import pretty_plot
from scipy.stats import norm
plt = pretty_plot(12, 8)
transitions = self.read_excitation_energies()
minval = min([val[0] for val in transitions]) - 5.0 * sigma
maxval = max([val[0] for val in transitions]) + 5.0 * sigma
npts = int((maxval - minval) / step) + 1
eneval = np.linspace(minval, maxval, npts) # in eV
lambdaval = [cst.h * cst.c / (val * cst.e) * 1.e9
for val in eneval] # in nm
# sum of gaussian functions
spectre = np.zeros(npts)
for trans in transitions:
spectre += trans[2] * norm(eneval, trans[0], sigma)
spectre /= spectre.max()
plt.plot(lambdaval, spectre, "r-", label="spectre")
data = {"energies": eneval, "lambda": lambdaval, "xas": spectre}
# plot transitions as vlines
plt.vlines([val[1] for val in transitions],
0.,
[val[2] for val in transitions],
color="blue",
label="transitions",
linewidth=2)
plt.xlabel("$\\lambda$ (nm)")
plt.ylabel("Arbitrary unit")
plt.legend()
return data, plt
def save_spectre_plot(self, filename="spectre.pdf", img_format="pdf",
sigma=0.05, step=0.01):
"""
Save matplotlib plot of the spectre to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
"""
d, plt = self.get_spectre_plot(sigma, step)
plt.savefig(filename, format=img_format)
def to_input(self, mol=None, charge=None,
spin_multiplicity=None, title=None, functional=None,
basis_set=None, route_parameters=None, input_parameters=None,
link0_parameters=None, dieze_tag=None, cart_coords=False):
"""
Create a new input object using by default the last geometry read in
the output file and with the same calculation parameters. Arguments
are the same as GaussianInput class.
Returns
gaunip (GaussianInput) : the gaussian input object
"""
if not mol:
mol = self.final_structure
if charge is None:
charge = self.charge
if spin_multiplicity is None:
spin_multiplicity = self.spin_multiplicity
if not title:
title = self.title
if not functional:
functional = self.functional
if not basis_set:
basis_set = self.basis_set
if not route_parameters:
route_parameters = self.route_parameters
if not link0_parameters:
link0_parameters = self.link0
if not dieze_tag:
dieze_tag = self.dieze_tag
return GaussianInput(mol=mol,
charge=charge,
spin_multiplicity=spin_multiplicity,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_parameters,
input_parameters=input_parameters,
link0_parameters=link0_parameters,
dieze_tag=dieze_tag)
| mit |
pythonvietnam/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
cskyan/gesgnext | bin/gsx_extrc.py | 1 | 83361 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# Copyright (C) 2013-2016 by Caspar. All rights reserved.
# File Name: gsx_extrc.py
# Author: Shankai Yan
# E-mail: [email protected]
# Created Time: 2016-03-16 15:56:16
###########################################################################
#
import os
import sys
import re
import sys
import ast
import time
import bisect
import logging
import operator
import itertools
import collections
from shutil import copyfile
from optparse import OptionParser
import numpy as np
import scipy as sp
import scipy.stats as stats
import pandas as pd
from sklearn.preprocessing import label_binarize
from sklearn.feature_selection import VarianceThreshold, SelectKBest, SelectPercentile, SelectFpr, SelectFromModel, chi2, f_classif
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier, NearestCentroid
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier, SGDClassifier, Perceptron, PassiveAggressiveClassifier, LassoCV, LassoLarsCV, LassoLarsIC, RandomizedLasso
from sklearn.svm import SVC, LinearSVC
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, BaggingClassifier, AdaBoostClassifier, GradientBoostingClassifier
# from xgboost import XGBClassifier
from sklearn.cluster import DBSCAN, AgglomerativeClustering
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.metrics.pairwise import pairwise_distances
from keras.utils.io_utils import HDF5Matrix
from bionlp.spider import geoxml as geo, annot, hgnc, dnorm, rxnav, sparql
from bionlp.model.fzcmeans import FZCMeans, CNSFZCMeans
from bionlp.model.lda import LDACluster
from bionlp.model import kerasext, kallima
from bionlp.util import fs, io, func, ontology
from bionlp import ftslct, txtclf, dstclc, txtclt, nlp
import gsc
import gsx_helper as helper
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
PAR_DIR = os.path.abspath(os.path.join(FILE_DIR, os.path.pardir))
CONFIG_FILE = os.path.join(PAR_DIR, 'etc', 'config.yaml')
SPDR_MAP = {'gsc':gsc, 'geo':geo}
KERAS_DEVID, KERAS_DEV, KERAS_DEVINIT, KERAS_DEVARS = 0, '', False, {}
SC=';;'
opts, args = {}, []
cfgr = None
spdr = geo
def load_data(type='gse', pid=-1, fmt='npz', spfmt='csr'):
print 'Loading data for %s ...' % type.upper()
try:
if (type == 'gsm-clf'):
if (pid == -1):
Xs, Ys, _ = spdr.get_data(None, type='gsm', from_file=True, fmt=fmt, spfmt=spfmt)
else:
Xs, Ys, _ = spdr.get_mltl_npz(type='gsm', lbs=[pid], spfmt=spfmt) if opts.mltl else spdr.get_mltl_npz(type='gsm', lbs=['%i_%i' % (int(pid/2), int(pid%2))], spfmt=spfmt)
return Xs, Ys
elif (type == 'gsm-clt'):
if (pid == -1):
Xs, Ys, labels = spdr.get_data(None, type='gsm', from_file=True, fmt=fmt, spfmt=spfmt)
else:
Xs, Ys, labels = spdr.get_mltl_npz(type='gsm', lbs=[pid], spfmt=spfmt)
gsm2gse = get_gsm2gse(data_path=spdr.DATA_PATH)
return Xs, Ys, labels, gsm2gse
elif (type == 'sgn'):
if (pid == -1):
Xs, Ys, labels = spdr.get_data(None, type='gsm', from_file=True, fmt=fmt, spfmt=spfmt)
else:
Xs, Ys, labels = spdr.get_mltl_npz(type='gsm', lbs=[pid], spfmt=spfmt)
gsm2gse = get_gsm2gse(data_path=spdr.DATA_PATH)
return Xs, Ys, labels, gsm2gse
if (pid == -1):
# From combined data file
X, Y = spdr.get_data(None, type=type, from_file=True, fmt=fmt, spfmt=spfmt)
else:
# From splited data file
Xs, Ys = spdr.get_mltl_npz(type=type, lbs=[pid], mltlx=False, spfmt=spfmt)
X, Y = Xs[0], Ys[0]
return X, Y
except Exception as e:
print e
print 'Can not find the data files!'
sys.exit(1)
def get_gsm2gse(data_path):
return io.read_df(os.path.join(data_path, 'gsm2gse.npz'), with_idx=True)
def build_model(mdl_func, mdl_t, mdl_name, tuned=False, pr=None, mltl=False, mltp=True, **kwargs):
if (tuned and bool(pr)==False):
print 'Have not provided parameter writer!'
return None
if (mltl):
return OneVsRestClassifier(mdl_func(**func.update_dict(pr(mdl_t, mdl_name) if tuned else {}, kwargs)), n_jobs=opts.np) if (mltp) else OneVsRestClassifier(mdl_func(**func.update_dict(pr(mdl_t, mdl_name) if tuned else {}, kwargs)))
else:
return mdl_func(**func.update_dict(pr(mdl_t, mdl_name) if tuned else {}, kwargs))
# Keras Deep Learning
def gen_keras(input_dim, output_dim, model='simple', **kwargs):
from bionlp.model import cfkmeans
mdl_map = {'simple':(simple_nn, 'clf'), 'tunable':(tunable_nn, 'clf'), 'cfkmeans':(cfkmeans.cfkmeans_mdl, 'clt')}
mdl = mdl_map[model]
return kerasext.gen_mdl(input_dim, output_dim, mdl[0], mdl[1], backend=opts.dend, verbose=opts.verbose, **kwargs)
# Constraint Fuzzy K-means Neural Network
# def _cfkmeans_loss(Y_true, Y):
# import keras.backend as K
# return K.mean(Y)
# def cfkmeans_nn(input_dim=1, output_dim=1, constraint_dim=0, batch_size=32, backend='th', device='', session=None, internal_dim=64, metric='euclidean', gamma=0.01, **kwargs):
# from keras.layers import Input, Lambda, merge
# from keras.optimizers import SGD
# from bionlp.model.cfkmeans import CFKU, CFKD, CFKC
# import keras.backend as K
# with kerasext.gen_cntxt(backend, device):
# X_input = Input(shape=(input_dim,), dtype=K.floatx(), name='X')
# C_input = Input(shape=(constraint_dim,), name='CI')
# cfku = CFKU(output_dim=output_dim, input_dim=input_dim, batch_size=batch_size, name='U', session=session)(X_input)
# cfkd = CFKD(output_dim=output_dim, input_dim=input_dim, metric=metric, batch_size=batch_size, name='D', session=session)([X_input, cfku])
# loss = merge([cfku, cfkd], mode='mul', name='L')
# rglz = Lambda(lambda x: gamma * K.tanh(x), name='R')(cfku)
# constr = CFKC(output_dim=output_dim, input_dim=input_dim, batch_size=batch_size, name='C', session=session)([C_input, cfku, cfkd])
# J = merge([loss, rglz, constr], mode='sum', name='J')
# model = kerasext.gen_cltmdl(context=dict(backend=backend, device=device), session=session, input=[X_input, C_input], output=[J], constraint_dim=constraint_dim)
# optmzr = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss=_cfkmeans_loss, optimizer=optmzr, metrics=['accuracy', 'mse'])
# return model
# Tunable Deep Learning Model
def tunable_nn(input_dim=1, output_dim=1, backend='th', device='', session=None, internal_dim=64, layer_num=3, init='uniform', activation='tanh', dropout_ratio=0.5):
from keras.layers import Dense, Dropout
from keras.optimizers import SGD
with kerasext.gen_cntxt(backend, device):
model = kerasext.gen_mlseq(context=dict(backend=backend, device=device), session=session)
model.add(Dense(output_dim=internal_dim, input_dim=input_dim, init=init, activation=activation))
model.add(Dropout(dropout_ratio))
for i in xrange(layer_num):
model.add(Dense(output_dim=internal_dim, init=init, activation=activation))
model.add(Dropout(dropout_ratio))
model.add(Dense(output_dim=output_dim, init=init, activation='sigmoid'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy', 'mae'])
return model
# Simple Deep Learning Model
def simple_nn(input_dim=1, output_dim=1, backend='th', device='', session=None, internal_dim=64):
from keras.layers import Dense, Dropout
from keras.optimizers import SGD
with kerasext.gen_cntxt(backend, device):
model = kerasext.gen_mlseq(context=dict(backend=backend, device=device), session=session)
model.add(Dense(output_dim=internal_dim, input_dim=input_dim, init='uniform', activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(output_dim=internal_dim, init='uniform', activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(output_dim=output_dim, init='uniform', activation='sigmoid'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy', 'mae'])
return model
# Neural Network Classification Models
def gen_nnclfs(input_dim, output_dim, other_clfs=None, **kwargs):
def nnclfs(tuned=False, glb_filtnames=[], glb_clfnames=[]):
tuned = tuned or opts.best
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
clf_names = []
for clf_name, clf in [
('3L64U Neural Network', gen_keras(input_dim, output_dim, model='simple', internal_dim=64)),
('3L96U Neural Network', gen_keras(input_dim, output_dim, model='simple', internal_dim=96))
]:
yield clf_name, clf
clf_names.append(clf_name)
if (other_clfs is not None):
for clf_name, clf in other_clfs(tuned, glb_filtnames, glb_clfnames):
yield clf_name, clf
clf_names.append(clf_name)
if (len(glb_clfnames) < len(clf_names)):
del glb_clfnames[:]
glb_clfnames.extend(clf_names)
return nnclfs
# Feature Filtering Models
def gen_featfilt(tuned=False, glb_filtnames=[], **kwargs):
tuned = tuned or opts.best
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
filt_names = []
for filt_name, filter in [
# ('Var Cut', VarianceThreshold()),
# ('Chi2 Pval on FPR', SelectFpr(chi2, alpha=0.05)),
# ('ANOVA-F Pval on FPR', SelectFpr(f_classif, alpha=0.05)),
# ('Chi2 Top K Perc', SelectPercentile(chi2, percentile=30)),
# ('ANOVA-F Top K Perc', SelectPercentile(f_classif, percentile=30)),
# ('Chi2 Top K', SelectKBest(chi2, k=1000)),
# ('ANOVA-F Top K', SelectKBest(f_classif, k=1000)),
# ('LinearSVC', LinearSVC(loss='squared_hinge', dual=False, **pr('Classifier', 'LinearSVC') if tuned else {})),
# ('Logistic Regression', SelectFromModel(LogisticRegression(dual=False, **pr('Feature Selection', 'Logistic Regression') if tuned else {}))),
# ('Lasso', SelectFromModel(LassoCV(cv=6), threshold=0.16)),
# ('Lasso-LARS', SelectFromModel(LassoLarsCV(cv=6))),
# ('Lasso-LARS-IC', SelectFromModel(LassoLarsIC(criterion='aic'), threshold=0.16)),
# ('Randomized Lasso', SelectFromModel(RandomizedLasso(random_state=0))),
# ('Extra Trees Regressor', SelectFromModel(ExtraTreesRegressor(100))),
# ('U102-GSS502', ftslct.MSelectKBest(ftslct.gen_ftslct_func(ftslct.utopk, filtfunc=ftslct.gss_coef, fn=100), k=500)),
# ('GSS502', ftslct.MSelectKBest(ftslct.gss_coef, k=500)),
# ('Combined Model', FeatureUnion([('Var Cut', VarianceThreshold()), ('Chi2 Top K', SelectKBest(chi2, k=1000))])),
('No Feature Filtering', None)
]:
yield filt_name, filter
filt_names.append(filt_name)
if (len(glb_filtnames) < len(filt_names)):
del glb_filtnames[:]
glb_filtnames.extend(filt_names)
# Classification Models
def gen_clfs(tuned=False, glb_clfnames=[], **kwargs):
tuned = tuned or opts.best
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
clf_names = []
for clf_name, clf in [
# ('RidgeClassifier', RidgeClassifier(tol=1e-2, solver='lsqr')),
# ('Perceptron', build_model(Perceptron, 'Classifier', 'Perceptron', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=1 if opts.mltl else opts.np)),
# ('Passive-Aggressive', PassiveAggressiveClassifier(n_iter=50, n_jobs=1 if opts.mltl else opts.np)),
# ('kNN', KNeighborsClassifier(n_neighbors=100, n_jobs=1 if opts.mltl else opts.np)),
# ('NearestCentroid', NearestCentroid()),
# ('BernoulliNB', BernoulliNB()),
# ('MultinomialNB', MultinomialNB()),
('RandomForest', build_model(RandomForestClassifier, 'Classifier', 'Random Forest', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=1 if opts.mltl else opts.np, random_state=0)),
('ExtraTrees', build_model(ExtraTreesClassifier, 'Classifier', 'Extra Trees', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=opts.np)),
# ('RandomForest', Pipeline([('clf', build_model(RandomForestClassifier, 'Classifier', 'Random Forest', tuned=tuned, pr=pr, n_jobs=opts.np, random_state=0))])),
# ('BaggingkNN', BaggingClassifier(KNeighborsClassifier(), max_samples=0.5, max_features=0.5, n_jobs=1 if opts.mltl else opts.np, random_state=0)),
# ('BaggingLinearSVC', build_model(BaggingClassifier, 'Classifier', 'Bagging LinearSVC', tuned=tuned, pr=pr, mltl=opts.mltl, base_estimator=build_model(LinearSVC, 'Classifier', 'LinearSVC', tuned=tuned, pr=pr, mltl=opts.mltl, loss='squared_hinge', dual=False), max_samples=0.5, max_features=0.5, n_jobs=1 if opts.mltl else opts.np, random_state=0)),
# ('LinSVM', build_model(LinearSVC, 'Classifier', 'LinearSVC', tuned=tuned, pr=pr, mltl=opts.mltl, loss='squared_hinge', dual=False)),
('AdaBoost', build_model(AdaBoostClassifier, 'Classifier', 'AdaBoost', tuned=tuned, pr=pr, mltl=opts.mltl)),
('GradientBoosting', build_model(GradientBoostingClassifier, 'Classifier', 'GBoost', tuned=tuned, pr=pr, mltl=opts.mltl)),
# ('XGBoost', build_model(XGBClassifier, 'Classifier', 'XGBoost', tuned=tuned, pr=pr, mltl=opts.mltl, mltp=False, n_jobs=opts.np)),
# ('XGBoost', build_model(XGBClassifier, 'Classifier', 'XGBoost', tuned=tuned, pr=pr, mltl=opts.mltl, mltp=False, nthread=opts.np)),
('RbfSVM', build_model(SVC, 'Classifier', 'RBF SVM', tuned=tuned, pr=pr, mltl=opts.mltl))
]:
yield clf_name, clf
clf_names.append(clf_name)
if (len(glb_clfnames) < len(clf_names)):
del glb_clfnames[:]
glb_clfnames.extend(clf_names)
# Benchmark Neural Network Models
def gen_bmnn_models(input_dim, output_dim, other_clfs=None, **kwargs):
def bmnn_models(tuned=False, glb_filtnames=[], glb_clfnames=[]):
# Feature Filtering Model
for filt_name, filter in gen_featfilt(tuned, glb_filtnames):
# Classification Model
clf_iter = gen_nnclfs(input_dim, output_dim, other_clfs)
for clf_name, clf in clf_iter(tuned, glb_clfnames):
yield filt_name, filter, clf_name, clf
del clf
del filter
return bmnn_models
# Benchmark Models
def gen_bm_models(tuned=False, glb_filtnames=[], glb_clfnames=[], **kwargs):
# Feature Filtering Model
for filt_name, filter in gen_featfilt(tuned, glb_filtnames):
# Classification Model
for clf_name, clf in gen_clfs(tuned, glb_clfnames):
yield filt_name, filter, clf_name, clf
del clf
del filter
# Combined Models
def gen_cb_models(tuned=False, glb_filtnames=[], glb_clfnames=[], **kwargs):
tuned = tuned or opts.best
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
# filtref_func = ftslct.filtref(os.path.join(spdr.DATA_PATH, 'X.npz'), os.path.join(spdr.DATA_PATH, 'union_filt_X.npz'))
for mdl_name, mdl in [
('UDT-RF', Pipeline([('clf', build_model(RandomForestClassifier, 'Classifier', 'Random Forest', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=1 if opts.mltl else opts.np, random_state=0))])),
('UDT-ET', Pipeline([('clf', build_model(ExtraTreesClassifier, 'Classifier', 'Extra Trees', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=opts.np))])),
('UDT-AB', Pipeline([('clf', build_model(AdaBoostClassifier, 'Classifier', 'AdaBoost', tuned=tuned, pr=pr, mltl=opts.mltl))])),
('UDT-GB', Pipeline([('clf', build_model(GradientBoostingClassifier, 'Classifier', 'GBoost', tuned=tuned, pr=pr, mltl=opts.mltl))])),
('UDT-RbfSVM', Pipeline([('clf', build_model(SVC, 'Classifier', 'RBF SVM', tuned=tuned, pr=pr, mltl=opts.mltl, probability=True))])),
# ('RandomForest', Pipeline([('clf', build_model(RandomForestClassifier, 'Classifier', 'Random Forest', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=1 if opts.mltl else opts.np, random_state=0))])),
# ('UDT-RF', Pipeline([('featfilt', ftslct.MSelectKBest(ftslct.utopk, filtfunc=ftslct.decision_tree, k=200, fn=100)), ('clf', build_model(RandomForestClassifier, 'Classifier', 'Random Forest', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=1 if opts.mltl else opts.np, random_state=0))])),
# ('UDT-RF', Pipeline([('featfilt', ftslct.MSelectOverValue(ftslct.filtref(os.path.join(spdr.DATA_PATH, 'gsm_X_0.npz'), os.path.join(spdr.DATA_PATH, 'udt200', 'gsm_X_0.npz')))), ('clf', build_model(RandomForestClassifier, 'Classifier', 'Random Forest', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=1 if opts.mltl else opts.np, random_state=0))])),
# ('RandomForest', Pipeline([('featfilt', SelectFromModel(DecisionTreeClassifier(criterion='entropy', class_weight='balanced', random_state=0))), ('clf', build_model(RandomForestClassifier, 'Classifier', 'Random Forest', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=1 if opts.mltl else opts.np, random_state=0))])),
# ('DF-RbfSVM', Pipeline([('featfilt', ftslct.MSelectOverValue(ftslct.filtref(os.path.join(spdr.DATA_PATH, 'X.npz'), os.path.join(spdr.DATA_PATH, 'union_filt_X.npz'), os.path.join(spdr.DATA_PATH, 'orig_X.npz')))), ('clf', build_model(SVC, 'Classifier', 'RBF SVM', tuned=tuned, pr=pr, mltl=opts.mltl, probability=True))])),
# ('L1-LinSVC', Pipeline([('clf', build_model(LinearSVC, 'Classifier', 'LinearSVC', tuned=tuned, pr=pr, mltl=opts.mltl, loss='squared_hinge', dual=False))])),
# ('Perceptron', Pipeline([('clf', build_model(Perceptron, 'Classifier', 'Perceptron', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=1 if opts.mltl else opts.np))])),
# ('MNB', Pipeline([('clf', build_model(MultinomialNB, 'Classifier', 'MultinomialNB', tuned=tuned, pr=pr, mltl=opts.mltl))])),
# ('5NN', Pipeline([('clf', build_model(KNeighborsClassifier, 'Classifier', 'kNN', tuned=tuned, pr=pr, mltl=opts.mltl, n_neighbors=5, n_jobs=1 if opts.mltl else opts.np))])),
# ('MEM', Pipeline([('clf', build_model(LogisticRegression, 'Classifier', 'Logistic Regression', tuned=tuned, pr=pr, mltl=opts.mltl, dual=False))])),
# ('LinearSVC with L2 penalty [Ft Filt] & Perceptron [CLF]', Pipeline([('featfilt', SelectFromModel(build_model(LinearSVC, 'Feature Selection', 'LinearSVC', tuned=tuned, pr=pr, mltl=opts.mltl, loss='squared_hinge', dual=False, penalty='l2'))), ('clf', build_model(Perceptron, 'Classifier', 'Perceptron', tuned=tuned, pr=pr, n_jobs=opts.np))])),
# ('ExtraTrees', Pipeline([('clf', build_model(ExtraTreesClassifier, 'Classifier', 'Extra Trees', tuned=tuned, pr=pr, mltl=opts.mltl, n_jobs=opts.np))])),
# ('Random Forest', Pipeline([('clf', build_model(RandomForestClassifier, 'Classifier', 'Random Forest', tuned=tuned, pr=pr, n_jobs=opts.np, random_state=0))])),
# ('AdaBoost', Pipeline([('clf', build_model(AdaBoostClassifier, 'Classifier', 'AdaBoost', tuned=tuned, pr=pr, mltl=opts.mltl))])),
# ('GradientBoosting', Pipeline([('clf', build_model(GradientBoostingClassifier, 'Classifier', 'GBoost', tuned=tuned, pr=pr, mltl=opts.mltl))])),
# ('XGBoost', Pipeline([('clf', build_model(XGBClassifier, 'Classifier', 'XGBoost', tuned=tuned, pr=pr, mltl=opts.mltl, mltp=False, n_jobs=opts.np))])),
# ('XGBoost', Pipeline([('clf', build_model(XGBClassifier, 'Classifier', 'XGBoost', tuned=tuned, pr=pr, mltl=opts.mltl, mltp=False, nthread=opts.np))])),
# ('RbfSVM', Pipeline([('clf', build_model(SVC, 'Classifier', 'RBF SVM', tuned=tuned, pr=pr, mltl=opts.mltl, probability=True))]))
]:
yield mdl_name, mdl
# Neural Network Clustering model
def gen_nnclt_models(input_dim, output_dim, constraint_dim=0, batch_size=32, other_clts=None, **kwargs):
def nnclt(tuned=False, glb_filtnames=[], glb_cltnames=[], **kwargs):
tuned = tuned or opts.best
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
clt_names = []
for clt_name, clt in [
('3L64U Neural Network', gen_keras(input_dim, output_dim, model='cfkmeans', constraint_dim=constraint_dim, batch_size=batch_size, internal_dim=64, metric='manhattan', gamma=0.01, **kwargs)),
('3L96U Neural Network', gen_keras(input_dim, output_dim, model='cfkmeans', constraint_dim=constraint_dim, batch_size=batch_size, internal_dim=96, metric='manhattan', gamma=0.01, **kwargs))
]:
yield clt_name, clt
clt_names.append(clt_name)
if (other_clts is not None):
for clt_name, clt in other_clts(tuned, glb_filtnames, glb_clfnames):
yield clt_name, clt
clt_names.append(clt_name)
if (len(glb_cltnames) < len(clt_names)):
del glb_cltnames[:]
glb_cltnames.extend(clt_names)
return nnclt
# Clustering model
def gen_clt_models(tuned=False, glb_filtnames=[], glb_cltnames=[], **kwargs):
tuned = tuned or opts.best
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
clt_names = []
for clt_name, clt in [
# ('SJI-AGGLM', Pipeline([('distcalc', dstclc.gen_dstclc(dstclc.sji)), ('clt', AgglomerativeClustering(metric='precomputed'))])),
# ('Manh-DBSCAN', Pipeline([('distcalc', dstclc.gen_dstclc(pairwise_distances, kw_args={'metric':'manhattan', 'n_jobs':opts.np})), ('clt', DBSCAN(min_samples=2, metric='precomputed', n_jobs=opts.np))])),
# ('Manh-DBSCAN', DBSCAN(min_samples=2, metric='manhattan', algorithm='ball_tree', n_jobs=opts.np)),
('FuzzyCmeans', FZCMeans(n_clusters=100, random_state=0)),
('LDA', LDACluster(n_clusters=100, learning_method='online', learning_offset=50., max_iter=5, n_jobs=opts.np, random_state=0)),
]:
yield clt_name, clt
clt_names.append(clt_name)
if (len(glb_cltnames) < len(clt_names)):
del glb_cltnames[:]
glb_cltnames.extend(clt_names)
def gen_cbclt_models(tuned=False, glb_filtnames=[], glb_clfnames=[], **kwargs):
try:
import hdbscan
except Exception as e:
print e
tuned = tuned or opts.best
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
for mdl_name, mdl in [
# ('CNZ-DBSCAN', Pipeline([('distcalc', dstclc.gen_dstclc(dstclc.cns_dist, kw_args={'metric':'euclidean', 'C':kwargs.setdefault('constraint', None), 'a':0.4, 'n_jobs':opts.np})), ('clt', DBSCAN(metric='precomputed', n_jobs=opts.np))])),
# ('CNZ-HDBSCAN', Pipeline([('distcalc', dstclc.gen_dstclc(dstclc.cns_dist, kw_args={'metric':'euclidean', 'C':kwargs.setdefault('constraint', None), 'a':0.4, 'n_jobs':opts.np})), ('clt', hdbscan.HDBSCAN(min_cluster_size=2, metric='precomputed', n_jobs=opts.np))])),
# ('ManhCNZ-DBSCAN', Pipeline([('distcalc', dstclc.gen_dstclc(dstclc.cns_dist, kw_args={'metric':'manhattan', 'C':kwargs.setdefault('constraint', None), 'a':0.4, 'n_jobs':opts.np})), ('clt', DBSCAN(metric='precomputed', n_jobs=opts.np))])),
# ('ManhCNZ-HDBSCAN', Pipeline([('distcalc', dstclc.gen_dstclc(dstclc.cns_dist, kw_args={'metric':'manhattan', 'C':kwargs.setdefault('constraint', None), 'a':0.4, 'n_jobs':opts.np})), ('clt', hdbscan.HDBSCAN(min_cluster_size=2, metric='precomputed', n_jobs=opts.np))])),
# ('Kallima', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.5), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.5), rcexp=1, cond=kwargs.setdefault('cond', 0.3), cross_merge=False, merge_all=False, save_g=True, distmt=kwargs.setdefault('distmt', None), n_jobs=opts.np))])),
('Kallima', Pipeline([('clt', txtclt.DummyCluster(output='clt_pred_kallima_%i.npz' % opts.pid))])),
# ('Kallima-a-0_4', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.4), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.4), rcexp=1, cond=kwargs.setdefault('cond', 0.3), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-a-0_3', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.3), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.4), rcexp=1, cond=kwargs.setdefault('cond', 0.3), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-a-0_6', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.6), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.4), rcexp=1, cond=kwargs.setdefault('cond', 0.3), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-a-0_7', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.7), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.4), rcexp=1, cond=kwargs.setdefault('cond', 0.3), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-th-0_3', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.5), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.3), rcexp=1, cond=kwargs.setdefault('cond', 0.3), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-th-0_2', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.5), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.2), rcexp=1, cond=kwargs.setdefault('cond', 0.3), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-th-0_5', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.5), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.5), rcexp=1, cond=kwargs.setdefault('cond', 0.3), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-th-0_6', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.5), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.6), rcexp=1, cond=kwargs.setdefault('cond', 0.3), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-ph-0_2', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.5), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.4), rcexp=1, cond=kwargs.setdefault('cond', 0.2), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-ph-0_1', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.5), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.4), rcexp=1, cond=kwargs.setdefault('cond', 0.1), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-ph-0_4', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.5), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.4), rcexp=1, cond=kwargs.setdefault('cond', 0.4), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
# ('Kallima-ph-0_5', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=kwargs.setdefault('cns_ratio', 0.5), nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=kwargs.setdefault('coarse', 0.4), rcexp=1, cond=kwargs.setdefault('cond', 0.5), cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])),
('Manh-DBSCAN', Pipeline([('clt', DBSCAN(metric='manhattan', algorithm='ball_tree', n_jobs=opts.np))])),
('FuzzyCmeans', Pipeline([('clt', FZCMeans(n_clusters=1500, random_state=0))])),
# ('CNSFuzzyCmeans', Pipeline([('clt', CNSFZCMeans(n_clusters=1500, a=0.4, random_state=0, n_jobs=opts.np))])),
# ('LDA', Pipeline([('clt', LDACluster(n_clusters=1500, learning_method='online', learning_offset=50., max_iter=5, n_jobs=opts.np, random_state=0))])),
]:
yield mdl_name, mdl
# DNN Models with parameter range
def gen_nnmdl_params(input_dim, output_dim, rdtune=False):
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
if (rdtune):
for mdl_name, mdl, params in [
('Neural Network', gen_keras(input_dim, output_dim, model='tunable'), {
'param_space':dict(
internal_dim=np.logspace(6, 9, num=4, base=2, dtype='int'),
layer_num=np.logspace(2, 6, num=5, base=2, dtype='int'),
dropout_ratio=np.logspace(-0.301, 0, num=10).tolist(),
init=['uniform', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'],
activation=['tanh', 'sigmoid', 'hard_sigmoid', 'relu', 'linear', 'softplus', 'softsign']),
'n_iter':32
})
]:
yield mdl_name, mdl, params
else:
for mdl_name, mdl, params in [
('Neural Network', gen_keras(input_dim, output_dim, model='tunable'), {
'param_space':dict(
internal_dim=np.logspace(6, 9, num=4, base=2, dtype='int'),
layer_num=np.logspace(2, 6, num=5, base=2, dtype='int'),
dropout_ratio=np.logspace(-0.301, 0, num=10).tolist(),
init=['uniform', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'],
activation=['tanh', 'sigmoid', 'hard_sigmoid', 'relu', 'linear', 'softplus', 'softsign'])
})
]:
yield mdl_name, mdl, params
# Models with parameter range
def gen_mdl_params():
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
for mdl_name, mdl, params in [
('Random Forest', RandomForestClassifier(random_state=0), {
'struct': True,
'param_names': ['n_estimators', 'max_features', 'max_depth', 'min_samples_leaf', 'class_weight'],
'param_space': {'class_weight':dict(zip(['balanced', 'None'],
[func.update_dict(x, y) for x, y in
zip([dict(), dict()],
[dict(
n_estimators=[50, 1000],
max_features=[0.5, 1],
max_depth=[1, 100],
min_samples_leaf=[1, 100]
)] * 2
)]))},
'n_iter':32
}),
# ('Random Forest', RandomForestClassifier(random_state=0), {
# 'param_space':dict(
# n_estimators=[50, 1000],
# max_features=[0.5, 1],
# max_depth=[1, 100],
# min_samples_leaf=[1, 100]),
# 'n_iter':32
# }),
# ('Extra Trees', ExtraTreesClassifier(random_state=0), {
# 'struct': True,
# 'param_names': ['n_estimators', 'max_features', 'max_depth', 'min_samples_leaf', 'class_weight'],
# 'param_space': {'class_weight':dict(zip(['balanced', 'None'],
# [func.update_dict(x, y) for x, y in
# zip([dict(), dict()],
# [dict(
# n_estimators=[50, 1000],
# max_features=[0.5, 1],
# max_depth=[1, 100],
# min_samples_leaf=[1, 100]
# )] * 2
# )]))},
# 'n_iter':32
# }),
# ('Extra Trees', ExtraTreesClassifier(random_state=0), {
# 'param_space':dict(
# n_estimators=[50, 1000],
# max_features=[0.5, 1],
# max_depth=[1, 100],
# min_samples_leaf=[1, 100]),
# 'n_iter':32
# }),
# ('GBoostOVR', OneVsRestClassifier(GradientBoostingClassifier(random_state=0)), {
# 'struct': True,
# 'param_names': ['estimator__n_estimators', 'estimator__subsample', 'estimator__max_features', 'estimator__max_depth', 'estimator__min_samples_leaf', 'estimator__learning_rate', 'estimator__loss'],
# 'param_space': {'loss':dict(zip(['deviance', 'exponential'],
# [func.update_dict(x, y) for x, y in
# zip([dict(), dict()],
# [dict(
# estimator__n_estimators=[20, 600],
# estimator__subsample=[0.5, 1],
# estimator__max_features=[0.5, 1],
# estimator__max_depth=[1, 100],
# estimator__min_samples_leaf=[1, 100],
# estimator__learning_rate=[0.5, 1]
# )] * 2
# )]))},
# 'n_iter':32
# }),
# ('GBoostOVR', OneVsRestClassifier(GradientBoostingClassifier(random_state=0)), {
# 'param_space':dict(
# estimator__n_estimators=[20, 600],
# estimator__subsample=[0.5, 1],
# estimator__max_features=[0.5, 1],
# estimator__max_depth=[1, 100],
# estimator__min_samples_leaf=[1, 100],
# estimator__learning_rate=[0.5, 1]),
# 'n_iter':32
# }),
# ('Logistic Regression', LogisticRegression(dual=False), {
# 'param_space':dict(
# penalty=['l1', 'l2'],
# C=np.logspace(-5, 5, 11),
# tol=np.logspace(-6, 3, 10)),
# 'n_iter':32
# }),
# ('LinearSVC', LinearSVC(dual=False), {
# 'param_space':dict(
# penalty=['l1', 'l2'],
# C=np.logspace(-5, 5, 11),
# tol=np.logspace(-6, 3, 10)),
# 'n_iter':32
# }),
# ('Perceptron', Perceptron(), {
# 'param_space':dict(
# alpha=np.logspace(-6, 3, 10),
# n_iter=stats.randint(3, 20)),
# 'n_iter':32
# }),
# ('MultinomialNB', MultinomialNB(), {
# 'param_space':dict(
# alpha=np.logspace(-6, 3, 10),
# fit_prior=[True, False]),
# 'n_iter':32
# }),
# ('SVM', SVC(), {
# 'param_space':dict(
# kernel=['linear', 'rbf', 'poly'],
# C=np.logspace(-5, 5, 11),
# gamma=np.logspace(-6, 3, 10)),
# 'n_iter':32
# }),
# ('Extra Trees', ExtraTreesClassifier(random_state=0), {
# 'param_space':dict(
# n_estimators=[50, 100] + range(200, 1001, 200),
# max_features=np.linspace(0.5, 1, 6).tolist()+['sqrt', 'log2'],
# min_samples_leaf=[1]+range(10, 101, 10),
# class_weight=['balanced', None]),
# 'n_iter':32
# }),
# ('Bagging MNB', BaggingClassifier(base_estimator=MultinomialNB(), random_state=0), {
# 'param_space':dict(
# n_estimators=[20, 50, 100] + range(200, 601, 200),
# max_samples=np.linspace(0.5, 1, 6),
# max_features=np.linspace(0.5, 1, 6),
# bootstrap=[True, False],
# bootstrap_features=[True, False]),
# 'n_iter':32
# }),
# ('GBoost', GradientBoostingClassifier(random_state=0), {
# 'param_space':dict(
# n_estimators=[20, 50, 100] + range(200, 601, 200),
# subsample = np.linspace(0.5, 1, 6),
# max_features=np.linspace(0.5, 1, 6).tolist()+['sqrt', 'log2'],
# min_samples_leaf=[1]+range(10, 101, 10),
# learning_rate=np.linspace(0.5, 1, 6),
# loss=['deviance', 'exponential']),
# 'n_iter':32
# }),
# ('XGBoostOVR', OneVsRestClassifier(XGBClassifier(random_state=0)), {
# ('XGBoostOVR', OneVsRestClassifier(XGBClassifier(seed=0)), {
# 'param_space':dict(
# estimator__n_estimators=[20, 50, 100] + range(200, 601, 200),
# estimator__subsample = np.linspace(0.5, 1, 6),
# estimator__max_depth=[3, 5, 7] + range(10,101,10),
# estimator__learning_rate=np.linspace(0.5, 1, 6)),
# 'n_iter':32
# }),
# ('UGSS & RF', Pipeline([('featfilt', ftslct.MSelectKBest(ftslct.utopk, filtfunc=ftslct.gss_coef, fn=4000)), ('clf', RandomForestClassifier())]), {
# 'param_space':dict(
# featfilt__k=np.logspace(np.log2(250), np.log2(32000), 8, base=2).astype('int')),
# 'n_iter':32
# })
]:
yield mdl_name, mdl, params
def all_entry():
gse_clf()
gsm_clf()
gsm_clt()
gen_sgn()
def gse_clf():
global cfgr
if (opts.mltl):
pid = -1
else:
pid = opts.pid
print 'Process ID: %s' % pid
## Load data for GSE
gse_X, gse_Y = load_data(type='gse', pid=pid, fmt=opts.fmt, spfmt=opts.spfmt)
if (opts.mltl):
gse_Y = gse_Y.as_matrix()
if (len(gse_Y.shape) == 1 or gse_Y.shape[1] == 1):
gse_Y = gse_Y.reshape((gse_Y.shape[0],))
else:
gse_Y = gse_Y.as_matrix().reshape((gse_Y.shape[0],)) if (len(gse_Y.shape) == 2 and gse_Y.shape[1] == 1) else gse_Y.as_matrix()
## Cross validation for GSE
print 'Cross validation for GSE'
print 'Dataset size: X:%s, labels:%s' % (str(gse_X.shape), str(gse_Y.shape))
gse_filt_names, gse_clf_names, gse_pl_names = [[] for i in range(3)]
gse_pl_set = set([])
gse_model_iter = gen_cb_models if opts.comb else gen_bm_models
if (opts.dend is not None):
# gse_model_iter = gen_cb_models if opts.comb else gen_bmnn_models(gse_X.shape[1], gse_Y.shape[1] if len(gse_Y.shape) == 2 else 1, gse_model_iter)
gse_model_iter = gen_cb_models if opts.comb else gen_bmnn_models(gse_X.shape[1], gse_Y.shape[1] if len(gse_Y.shape) == 2 else 1, None)
model_param = dict(tuned=opts.best, glb_filtnames=gse_filt_names, glb_clfnames=gse_clf_names)
global_param = dict(comb=opts.comb, pl_names=gse_pl_names, pl_set=gse_pl_set)
txtclf.cross_validate(gse_X, gse_Y, gse_model_iter, model_param, avg=opts.avg, kfold=opts.kfold, cfg_param=cfgr('bionlp.txtclf', 'cross_validate'), global_param=global_param, lbid=pid)
def gsm_clf():
global cfgr
pid = opts.pid
print 'Process ID: %s' % pid
## Load data for GSM
gsm_Xs, gsm_Ys = load_data(type='gsm-clf', pid=pid, fmt=opts.fmt, spfmt=opts.spfmt)
gsm_Ys = [Y.as_matrix() if len(Y.shape) > 1 and Y.shape[1] > 1 else Y.as_matrix().reshape((Y.shape[0],)) for Y in gsm_Ys]
## Cross validation for GSM
print 'Cross validation for GSM'
orig_wd = os.getcwd()
for i, (X, Y) in enumerate(zip(gsm_Xs, gsm_Ys)):
print 'Dataset size: X:%s, labels:%s' % (str(X.shape), str(Y.shape))
# Switch to sub-working directory
new_wd = os.path.join(orig_wd, str(i) if pid == -1 else str(pid))
fs.mkdir(new_wd)
os.chdir(new_wd)
# Cross validation
gsm_filt_names, gsm_clf_names, gsm_pl_names = [[] for i in range(3)]
gsm_pl_set = set([])
gsm_model_iter = gen_cb_models if opts.comb else gen_bm_models
if (opts.dend is not None):
# gsm_model_iter = gen_cb_models if opts.comb else gen_bmnn_models(X.shape[1], Y.shape[1] if len(Y.shape) == 2 else 1, gsm_model_iter)
gsm_model_iter = gen_cb_models if opts.comb else gen_bmnn_models(X.shape[1], Y.shape[1] if len(Y.shape) == 2 else 1, None)
model_param = dict(tuned=opts.best, glb_filtnames=gsm_filt_names, glb_clfnames=gsm_clf_names)
global_param = dict(comb=opts.comb, pl_names=gsm_pl_names, pl_set=gsm_pl_set)
txtclf.cross_validate(X, Y, gsm_model_iter, model_param, avg=opts.avg, kfold=opts.kfold, cfg_param=cfgr('bionlp.txtclf', 'cross_validate'), global_param=global_param, lbid=pid)
# Switch back to the main working directory
os.chdir(orig_wd)
def gsm_clt():
global cfgr
if (opts.mltl):
pid = -1
else:
pid = opts.pid
print 'Process ID: %s' % pid
## Load data for GSM
Xs, Ys, labels, gsm2gse = load_data(type='gsm-clt', pid=pid, fmt=opts.fmt, spfmt=opts.spfmt)
labels = [lbs.as_matrix() if len(lbs.shape) > 1 and lbs.shape[1] > 1 else lbs.as_matrix().reshape((lbs.shape[0],)) for lbs in labels]
## Clustering for GSM
print 'Clustering for GSM ...'
for i, (X, y, c) in enumerate(zip(Xs, labels, Ys)):
# Transform the GEO IDs into constraints
# gse_ids = gsm2gse.loc[X.index]
# gseidc = label_binarize(gse_ids.as_matrix(), classes=gse_ids.gse_id.value_counts().index)
# c = np.hstack((c, gseidc))
print 'Dataset size: X:%s, labels:%s, constraints:%s' % (str(X.shape), str(y.shape), str(c.shape))
filt_names, clt_names, pl_names = [[] for j in range(3)]
pl_set = set([])
model_iter = gen_cbclt_models if opts.comb else gen_clt_models
if (opts.dend is not None):
y = label_binarize(y, classes=list(set([l for l in y.reshape((-1,)) if l != -1])))
# model_iter = gen_nnclt_models(input_dim=X.shape[1], output_dim=y.shape[1] if len(y.shape) == 2 else 1, constraint_dim=c.shape[1] if len(c.shape) == 2 else 1, batch_size=opts.bsize, other_clts=gsm_model_iter)
model_iter = gen_nnclt_models(input_dim=X.shape[1], output_dim=y.shape[1] if len(y.shape) == 2 else 1, constraint_dim=c.shape[1] if len(c.shape) == 2 else 1, batch_size=opts.bsize, other_clts=None)
try:
distmt = HDF5Matrix(opts.cache, 'distmt')
except Exception as e:
print e
distmt = None
model_param = dict(tuned=opts.best, glb_filtnames=filt_names, glb_cltnames=clt_names, is_fuzzy=opts.fuzzy, is_nn=False if opts.dend is None else True, constraint=c, distmt=distmt)
global_param = dict(comb=opts.comb, pl_names=pl_names, pl_set=pl_set)
txtclt.cross_validate(X, y, model_iter, model_param, kfold=opts.kfold, cfg_param=cfgr('bionlp.txtclt', 'cross_validate'), global_param=global_param, lbid=pid)
# txtclt.clustering(X, model_iter, model_param, cfg_param=cfgr('bionlp.txtclt', 'clustering'), global_param=global_param, lbid=pid)
def _filt_ent(entities, onto_lb):
filtered = []
txt = nlp.clean_txt('\n'.join([e['word'] for e in entities]))
loc = np.cumsum([0] + [len(e['word']) + 1 for e in entities])
if (onto_lb == 'PRGE'):
succeeded, trial_num = False, 0
while (not succeeded and trial_num < 20):
try:
df = hgnc.symbol_checker(txt).dropna()
succeeded = True
except RuntimeError as e:
trial_num += 1
time.sleep(5)
if (df.empty): return []
idx = [bisect.bisect_left(loc, txt.find(x)) for x in df['Input']]
elif (onto_lb == 'DISO'):
df = dnorm.annot_dss(txt)
if (df.empty): return []
idx = [bisect.bisect_left(loc, loc_s) for loc_s in df['start']]
elif (onto_lb == 'CHED'):
c = rxnav.RxNavAPI('drugs')
idx = [i for i, e in enumerate(entities) if len(c.call(name=nlp.clean_txt(e['word']))['concept_group']) > 0]
else:
return entities
return [entities[i] for i in idx if i < len(entities)]
def gen_sgn(**kwargs):
global cfgr
common_cfg = cfgr('gsx_extrc', 'common')
sgn_cfg = cfgr('gsx_extrc', 'gen_sgn')
if (opts.mltl):
pid = -1
else:
pid = opts.pid
print 'Process ID: %s' % pid
if (opts.thrshd != 'mean' and opts.thrshd != 'min'):
opts.thrshd = float(ast.literal_eval(opts.thrshd))
if (len(sgn_cfg) > 0):
method = kwargs.setdefault('method', sgn_cfg['method'])
format = kwargs.setdefault('format', sgn_cfg['format'])
sample_dir = kwargs.setdefault('sample_dir', os.path.join('.', 'samples') if sgn_cfg['sample_dir'] is None else sgn_cfg['sample_dir'])
ge_dir = kwargs.setdefault('ge_dir', spdr.GEO_PATH if sgn_cfg['ge_dir'] is None else sgn_cfg['ge_dir'])
dge_dir = kwargs.setdefault('dge_dir', spdr.GEO_PATH if sgn_cfg['dge_dir'] is None else sgn_cfg['dge_dir'])
else:
print 'Configuration file is missing!'
sys.exit(-1)
## Load data for GSM and the association
Xs, Ys, labels, gsm2gse = load_data(type='sgn', pid=pid, fmt=opts.fmt, spfmt=opts.spfmt)
## Load GEO and GSM Documents
gse_docs, gsm_docs = spdr.get_geos(type='gse'), spdr.get_geos(type='gsm')
## Generating GSM Cluster Pairs
pair_dfs = []
for i, (X, Y, z) in enumerate(zip(Xs, Ys, labels)):
lbid = i if (pid == -1) else pid
io.inst_print('Generating pairs of GSM sample clusters for dataset %i ...' % lbid)
pair_df = helper._gsmclt_pair(X, Y, z, gsm2gse, lbid, thrshd=opts.thrshd, cache_path=opts.cache)
pair_dfs.append(pair_df)
## Generating Basic Signatures
presgn_dfs = []
for i, (X, Y, z, pair_df) in enumerate(zip(Xs, Ys, labels, pair_dfs)):
lbid = i if (pid == -1) else pid
sgn_fname = 'pre_sgn_%s.npz' % lbid
cachef = os.path.join(opts.cache, sgn_fname)
if (os.path.exists(cachef)):
io.inst_print('Reading cache for basic signatures of dataset %i ...' % lbid)
presgn_dfs.append(io.read_df(cachef, with_idx=True))
else:
io.inst_print('Generating the basic signatures for dataset %i ...' % lbid)
platforms, organisms, tissues = [[] for x in range(3)]
for gse_id, ctrl_str, pert_str in zip(pair_df['geo_id'], pair_df['ctrl_ids'], pair_df['pert_ids']):
gsm_doc_list = [gsm_docs[gsm_id][0] for gsm_id in ctrl_str.split('|') + pert_str.split('|')]
# Label the terms in the GEO documents that associated with each signature
pf_count, og_cout, ts_count = collections.Counter([doc['platform'] for doc in gsm_doc_list]).most_common(1), collections.Counter([doc['organism'] for doc in gsm_doc_list]).most_common(1), collections.Counter([doc['tissue'] for doc in gsm_doc_list if doc.has_key('tissue') and doc['tissue'] != ''] + [doc['tissue_type'] for doc in gsm_doc_list if doc.has_key('tissue_type') and doc['tissue_type'] != '']).most_common(1)
platforms.append(pf_count[0][0] if len(pf_count) > 0 else '')
organisms.append(og_cout[0][0] if len(og_cout) > 0 else '')
tissues.append(ts_count[0][0] if len(ts_count) > 0 else '')
columns = ['platform', 'organism', 'tissue']
preannot_df = pd.DataFrame.from_items([(k, v) for k, v in zip(columns, [platforms, organisms, tissues])], columns=columns)
preannot_df.index = pair_df.index
presgn_df = pd.concat([pair_df, preannot_df], axis=1, join_axes=[pair_df.index], copy=False)
# Set the index
presgn_df.index.name = 'id'
presgn_df.index = ['%s:%i' % (spdr.LABEL2ID[ds_lb], x) for x in range(presgn_df.shape[0])]
io.write_df(presgn_df, 'pre_sgn_%s.npz' % lbid, with_idx=True)
presgn_df.to_excel('pre_sgn_%s.xlsx' % lbid, encoding='utf8')
presgn_dfs.append(presgn_df)
# Calculate the Differential Gene Expression
ds_lb = gse_docs[pair_df['geo_id'][0]][1][0]
_label, _method = ds_lb.lower().replace(' ', '_'), method.lower().replace(' ', '_')
sample_path, ge_path, dge_path, dge_cache_path = os.path.join(sample_dir, format, _label, 'samples'), os.path.join(ge_dir, _label), os.path.join(dge_dir, _method, _label), os.path.join(dge_dir, 'cache', _label)
dge_filter_path, dge_cache_filter_path = os.path.join(dge_path, 'filtered'), os.path.join(dge_cache_path, 'filtered')
fs.mkdir(dge_filter_path), fs.mkdir(os.path.join(dge_cache_filter_path, _method))
io.inst_print('Calculating the gene expression for dataset %i ...' % lbid)
helper._sgn2ge(presgn_dfs[-1], sample_path, ge_path, format=format)
io.inst_print('Calculating the differential gene expression for dataset %i ...' % lbid)
dge_dfs = helper._sgn2dge(presgn_dfs[-1], method, ge_path, dge_path, dge_cache_path)
# Filter the pairs with high p-value
io.inst_print('Filtering the signatures for dataset %i according to the p-value of differential gene expression ...' % lbid)
pvalues = np.array([np.sort(dge_df['pvalue'].values)[:5].mean() for dge_df in dge_dfs])
selection = pvalues < (sgn_cfg['pval_thrshd'] if sgn_cfg.has_key('pval_thrshd') and sgn_cfg['pval_thrshd'] is not None else 0.05)
if (all(selection) == False):
presgn_dfs[-1] = presgn_dfs[-1][selection]
orig_ids = np.arange(pvalues.shape[0])[selection] # consistent with the index of original dge and the pre_sgn_x file
orig_map = pd.DataFrame(orig_ids.reshape((-1,1)), index=presgn_dfs[-1].index, columns=['orig_idx'])
io.write_df(orig_map, 'orig_map.npz', with_idx=True)
for idx, orig_idx in enumerate(orig_ids):
dge_src = os.path.join(dge_path, 'dge_%i.npz' % orig_idx)
dge_dst = os.path.join(dge_filter_path, 'dge_%i.npz' % idx)
if (not os.path.exists(dge_dst)):
copyfile(dge_src, dge_dst)
dge_cache_src = os.path.join(dge_cache_path, _method, '%i.npz' % orig_idx)
dge_cache_dst = os.path.join(dge_cache_filter_path, _method, '%i.npz' % idx)
if (not os.path.exists(dge_cache_dst)):
copyfile(dge_cache_src, dge_cache_dst)
# presgn_dfs[-1].index = ['%s:%i' % (spdr.LABEL2ID[ds_lb], x) for x in range(presgn_dfs[-1].shape[0])] # consistent with the index of filtered dge and the signature_x file
## Annotating Signatures
top_k = 3
cache_path = os.path.join(spdr.GEO_PATH, 'annot')
txt_fields = [['title', 'summary', 'keywords'], ['title', 'description', 'source', 'organism', 'treat_protocol', 'trait']]
txtfield_importance = {'title':8, 'summary':4, 'keywords':7, 'description':4, 'source':5, 'organism':5, 'treat_protocol':9, 'trait':5}
sgn_dfs, annot_lists, common_annots, annot_dicts = [[] for x in range(4)]
for i, (X, Y, z, presgn_df) in enumerate(zip(Xs, Ys, labels, presgn_dfs)):
lbid = i if (pid == -1) else pid
sgn_fname = 'signature_%s.npz' % lbid
cachef = os.path.join(opts.cache, sgn_fname)
if (os.path.exists(cachef)):
io.inst_print('Reading cache for annotated signatures of dataset %i ...' % lbid)
annot_list = io.read_obj(os.path.join(opts.cache, 'annot_list_%i.pkl' % lbid))
common_annot = io.read_obj(os.path.join(opts.cache, 'common_annot_%i.pkl' % lbid))
annot_dict = io.read_obj(os.path.join(opts.cache, 'annot_dict_%i.pkl' % lbid))
if (annot_list is not None and common_annot is not None and annot_dict is not None):
annot_lists.append(annot_list)
common_annots.append(common_annot)
annot_dicts.append(annot_dict)
sgn_dfs.append(io.read_df(cachef, with_idx=True))
continue
io.inst_print('Annotating the signatures for dataset %i ...' % lbid)
common_annot_list, annot_list = [[] for x in range(2)]
for gse_id, ctrl_str, pert_str in zip(presgn_df['geo_id'], presgn_df['ctrl_ids'], presgn_df['pert_ids']):
gsm_annotres, annot_ents, annot_terms, annot_weights = [], {}, {}, {}
gsm_list = ctrl_str.split('|') + pert_str.split('|')
gse_doc, gsm_doc_list = gse_docs[gse_id][0], [gsm_docs[gsm_id][0] for gsm_id in gsm_list]
txt_field_maps = [0] + [1] * len(gsm_doc_list)
# Annotate the GSE document
gse_annotres = helper._annot_sgn(gse_id, gse_doc, txt_fields[0], cache_path=cache_path)
# Annotate the GSM document
for geo_id, geo_doc in zip(gsm_list, gsm_doc_list):
gsm_annotres.append(helper._annot_sgn(geo_id, geo_doc, txt_fields[1], cache_path=cache_path))
annot_list.append([gse_annotres] + gsm_annotres)
# Extract the annotated entities from the results, and classify them based on the annotation (modifier) type
for annotres, tfmap in zip(annot_list[-1], txt_field_maps):
for annot_gp, txt_field in zip(annotres, txt_fields[tfmap]):
for annotype, entities in annot_gp.iteritems():
annot_ent = [':'.join(entity['ids'] + [entity['word']]) for entity in entities]
annot_ents.setdefault(annotype, []).extend(annot_ent)
annot_weights.setdefault(annotype, []).extend([txtfield_importance[txt_field]] * len(annot_ent))
annot_mdf = [entity['modifier'] for entity in entities if entity['modifier'] != '']
annot_ents.setdefault('mdf_'+annotype, []).extend(annot_mdf)
annot_weights.setdefault('mdf_'+annotype, []).extend([txtfield_importance[txt_field]] * len(annot_mdf))
# Obtain the top (2) k most common entities for each annotation (modifier) type
for annotype, entities in annot_ents.iteritems():
if (len(entities) == 0): continue
annot_weight = dstclc.normdist(np.array(annot_weights[annotype]))
ent_array = np.array(entities)
annot_count = func.sorted_tuples([(k, annot_weight[np.where(ent_array == k)[0]].sum()) for k in set(entities)], key_idx=1)[::-1]
if (annotype.startswith('mdf_')):
# annot_count = collections.Counter(entities).most_common(2)
annot_count = annot_count[:2]
if (len(annot_count) > 1 and annot_count[0][1] == annot_count[1][1]):
annot_text = ' & '.join(sorted(zip(*annot_count[:2])[0]))
elif len(annot_count) > 0:
annot_text = annot_count[0][0]
else:
annot_text = ''
annot_terms[annotype] = [annot_text]
else:
# annot_count = collections.Counter(entities).most_common(top_k)
annot_count = annot_count[:top_k]
annot_terms[annotype] = [x[0].split(':')[-1] for x in annot_count] if len(annot_count) > 0 else ['']
if (len(annot_terms) == 0):
print 'Unable to annotate signatures for GEO document %s !' % gse_id
common_annot_list.append(annot_terms)
annot_lists.append(annot_list)
io.write_obj(annot_list, 'annot_list_%i.pkl' % lbid)
common_annots.append(common_annot_list)
io.write_obj(common_annot_list, 'common_annot_%i.pkl' % lbid)
if (len(common_annot_list) == 0):
print 'Unable to annotate signatures, please check your network!'
continue
print 'Generating the annotated signatures for dataset %i ...' % lbid
# Combine all the annotation types
annotypes = list(set(func.flatten_list([x.keys() for x in common_annot_list])))
# Make a unified annotation dictionary
annot_dict = dict([(annotype, []) for annotype in annotypes])
for annotype in annotypes:
annot_dict[annotype].extend([annot_terms.setdefault(annotype, [''])[0] for annot_terms in common_annot_list])
annot_dicts.append(annot_dict)
io.write_obj(annot_dict, 'annot_dict_%i.pkl' % lbid)
annot_df = pd.DataFrame.from_items([(k, v) for k, v in annot_dict.iteritems() if len(v) == presgn_df.shape[0]])
annot_df.index = presgn_df.index
sgn_df = pd.concat([presgn_df, annot_df], axis=1, join_axes=[presgn_df.index], copy=False)
io.write_df(sgn_df, 'signature_%s.npz' % lbid, with_idx=True)
sgn_df.to_excel('signature_%s.xlsx' % lbid, encoding='utf8')
sgn_dfs.append(sgn_df)
## Annotating the Gene, Disease, and Drug ontology
postsgn_dfs, ontoid_cols, ontolb_cols = [[] for x in range(3)]
for i, (X, Y, z, annot_list, common_annot_list, sgn_df) in enumerate(zip(Xs, Ys, labels, annot_lists, common_annots, sgn_dfs)):
lbid = i if (pid == -1) else pid
sgn_fname = 'post_sgn_%s.npz' % lbid
cachef = os.path.join(opts.cache, sgn_fname)
if (os.path.exists(cachef)):
io.inst_print('Reading cache for ontology-annotated signatures of dataset %i ...' % lbid)
postsgn_dfs.append(io.read_df(cachef, with_idx=True))
continue
io.inst_print('Annotating the ontology for dataset %i ...' % lbid)
# Read the ontology database
ds_lb = gse_docs[sgn_df['geo_id'][0]][1][0]
onto_lb, ontodb_name = spdr.LABEL2ONTO[ds_lb], spdr.LABEL2DB[ds_lb]
onto_lang, idns, prdns, idprds, lbprds = spdr.DB2LANG[ontodb_name], getattr(ontology, spdr.DB2IDNS[ontodb_name]), [(ns.lower(), getattr(ontology, ns)) for ns in dict(spdr.DB2PRDS[ontodb_name]['idprd']).keys()], dict([((prdn[0].lower(), prdn[1]), '_'.join(prdn)) for prdn in spdr.DB2PRDS[ontodb_name]['idprd']]), dict([((prdn[0].lower(), prdn[1]), '_'.join(prdn)) for prdn in spdr.DB2PRDS[ontodb_name]['lbprds']])
ontodb_path = os.path.join(spdr.ONTO_PATH, ontodb_name)
# Get the ontology graph
# ontog = ontology.get_db_graph(ontodb_path, db_name=ontodb_name, db_type='SQLAlchemy') # from rdflib db
ontog = sparql.SPARQL('http://localhost:8890/%s/query' % ontodb_name, use_cache=common_cfg.setdefault('memcache', False)) # from Jena TDB
ontoid_cols.append(spdr.DB2IDN[ontodb_name])
ontolb_cols.append(spdr.DB2ONTON[ontodb_name])
ontoids, onto_labels = [[] for x in range(2)]
for gse_id, ctrl_str, pert_str, annotres_list, common_annot in zip(sgn_df['geo_id'], sgn_df['ctrl_ids'], sgn_df['pert_ids'], annot_list, common_annot_list):
gsm_list = ctrl_str.split('|') + pert_str.split('|')
gse_doc, gsm_doc_list = gse_docs[gse_id][0], [gsm_docs[gsm_id][0] for gsm_id in gsm_list]
annot_tkns, optional_tkns = [], []
txt_field_maps = [0] + [1] * len(gsm_doc_list)
# Only consider the summarized text fields of each GEO document
txt_lengths, txt_weights, opt_txt_lengths, opt_txt_weights = [[] for x in range(4)] # Record the boundary of different text fields and their weights
for annotres, geo_doc, tfmap in zip(annotres_list, [gse_doc] + gsm_doc_list, txt_field_maps):
for annot_gp, txt, txtfield in zip(annotres, [geo_doc[txt_field] for txt_field in txt_fields[tfmap]], txt_fields[tfmap]):
if (txt.isspace()): continue
txt_length = 0
init_tokens, locs = nlp.tokenize(txt, model='word', ret_loc=True)
if (locs is None or len(locs) == 0): continue
tokens, locs = nlp.del_punct(init_tokens, location=locs)
if (locs is None or len(locs) == 0): continue
start_loc, end_loc = zip(*locs)
entities = annot_gp.setdefault(onto_lb, [])
if (len(entities) > 0):
# entities = _filt_ent(entities, onto_lb)
# Only consider the top k most common annotation
# for entity in [x for x in entities if x['word'] in common_annot[onto_lb]]:
for entity in [x for x in entities]:
annot_tkns.append(entity['word'])
txt_length += (len(entity['word']) + 1)
# left_tkn_id = bisect.bisect_left(list(start_loc), int(entity['offset'])) - 1
# right_tkn_id = bisect.bisect_left(list(start_loc), int(entity['offset']) + len(entity['word']))
# print left_tkn_id, right_tkn_id, entity['offset'], locs
# Also consider a sliding window of the annotation terms to avoid inaccuracy of the annotation tool
# annot_tkns.extend([tokens[max(0, left_tkn_id)], entity['word'], entity['word'], entity['word'], tokens[min(len(tokens) - 1, right_tkn_id)]])
txt_lengths.append(txt_length)
txt_weights.append(txtfield_importance[txtfield])
if (onto_lb == 'PRGE' or onto_lb == 'DISO'):
optional_tkns.append(txt)
opt_txt_lengths.append(len(txt) + 1)
opt_txt_weights.append(txtfield_importance[txtfield])
annot_txt = ' '.join(annot_tkns)
if (annot_txt.isspace()):
annot_txt = ' '.join(optional_tkns).strip()
txt_lengths, txt_weights = opt_txt_lengths, opt_txt_weights
if (annot_txt.isspace()):
ontoids.append('')
onto_labels.append('')
continue
# Map the annotations to the ontology
onto_annotres = annot.annotonto(nlp.clean_txt(annot_txt), ontog, lang=onto_lang, idns=idns, prdns=prdns, idprds=idprds, dominant=True, lbprds=lbprds)
# Complementary of the ontology mapping using biological entities identification method
if (len(onto_annotres) == 0):
annot_txt = ' '.join(optional_tkns).strip()
txt_lengths, txt_weights = opt_txt_lengths, opt_txt_weights
if (onto_lb == 'PRGE'):
hgnc_cachef = os.path.join(spdr.HGNC_PATH, '%s_hgnc.npz' % gse_id)
if (os.path.exists(hgnc_cachef)):
annot_df = io.read_df(hgnc_cachef)
else:
annot_df = hgnc.symbol_checker(annot_txt, synonyms=True).dropna()
io.write_df(annot_df, hgnc_cachef, compress=True)
onto_annotres = zip(annot_df['HGNC ID'], annot_df['Approved symbol'], annot_df['Input'], map(func.find_substr(annot_txt), annot_df['Input']))
ontoid_cols[-1] = 'hgnc_id'
if (onto_lb == 'DISO'):
dnorm_cachef = os.path.join(spdr.DNORM_PATH, '%s_dnorm.npz' % gse_id)
if (os.path.exists(dnorm_cachef)):
annot_df = io.read_df(dnorm_cachef)
else:
annot_df = dnorm.annot_dss(nlp.clean_txt(annot_txt))
io.write_df(annot_df, dnorm_cachef, compress=True)
locations = zip(annot_df['start'], annot_df['end'])
onto_annotres = zip(annot_df['cid'], annot_df['concept'], [annot_txt[start:end] for start, end in locations], locations)
ontoid_cols[-1] = 'dnorm_id'
onto_annot_res = zip(*onto_annotres)
if (len(onto_annotres) == 0 or len(onto_annot_res) == 0 or len(onto_annot_res[0]) == 0):
ontoids.append('')
onto_labels.append('')
continue
ids, labels, tokens, locs = onto_annot_res
annotres_dict = dict(zip(ids, labels))
txt_bndry = np.cumsum(txt_lengths)
txt_normw = dstclc.normdist(np.array(txt_weights, dtype='float32'))
token_weights = np.array([txt_normw[txt_bndry.searchsorted(loc[0], side='right')] if loc[0] < len(annot_txt) else 0 for loc in locs])
id_array = np.array(ids)
annot_count = [(k, token_weights[np.where(id_array == k)[0]].sum()) for k in set(ids)]
# There might be several annotations with the same number
# annot_count = collections.Counter(ids).most_common(10)
# Find out the annotations with the most number, then sort them in alphabet order and pick the first one
max_count = max(map(operator.itemgetter(1), annot_count))
annot_ids = sorted([x[0] for x in func.sorted_tuples(annot_count, key_idx=1)[::-1] if x[1] == max_count])
annot_length = [(x, len(str(x))) for x in annot_ids]
annot_id = sorted([x[0] for x in func.sorted_tuples(annot_length, key_idx=1)])[0]
ontoids.append(annot_id)
onto_labels.append(annotres_dict[annot_id])
annot_df = pd.DataFrame.from_items([(ontoid_cols[-1], ontoids), (ontolb_cols[-1], onto_labels)])
annot_df.index = sgn_df.index
postsgn_df = pd.concat([sgn_df, annot_df], axis=1, join_axes=[sgn_df.index], copy=False)
# postsgn_df.index.name = 'id'
io.write_df(postsgn_df, 'post_sgn_%s.npz' % lbid, with_idx=True)
postsgn_df.to_excel('post_sgn_%s.xlsx' % lbid, encoding='utf8')
postsgn_dfs.append(postsgn_df)
## Signature Filtering and Cleaning
for i, postsgn_df in enumerate(postsgn_dfs):
lbid = i if (pid == -1) else pid
io.inst_print('Cleaning the signatures for dataset %i ...' % lbid)
ds_lb = gse_docs[postsgn_df['geo_id'][0]][1][0]
_label = ds_lb.lower().replace(' ', '_')
cln_sgn_df = postsgn_df.drop(postsgn_df.index[np.where(postsgn_df[ontolb_cols[i]] == '')[0]], axis=0)
# Create cell type column
cln_sgn_df['ANAT'] = [' '.join([mdf, x]) if x.startswith('cell') else x for mdf, x in zip(map(nlp.clean_txt, cln_sgn_df['mdf_ANAT'].fillna('')), map(nlp.clean_txt, cln_sgn_df['ANAT'].fillna('')))]
cln_sgn_df.rename(columns={'ANAT': 'cell_type'}, inplace=True)
cln_sgn_df.drop('mdf_ANAT', axis=1, inplace=True)
# Delete other useless columns
threshold = 0.5 * cln_sgn_df.shape[0]
del_cols = [col for col in cln_sgn_df.columns if np.where(cln_sgn_df[col] != '')[0].shape[0] < threshold]
cln_sgn_df.drop(del_cols, axis=1, inplace=True)
io.write_df(cln_sgn_df, '%s.npz' % _label, with_idx=True)
cln_sgn_df.to_excel('%s.xlsx' % _label, encoding='utf8')
def tuning(type='gse'):
if (type == 'gse'):
tuning_gse()
elif (type == 'gsm'):
tuning_gsm()
else:
tuning_gse()
tuning_gsm()
def tuning_gse():
from sklearn.model_selection import KFold
if (opts.mltl):
pid = -1
else:
pid = opts.pid
print 'Process ID: %s' % pid
## Load data for GSE
gse_X, gse_Y = load_data(type='gse', pid=pid, fmt=opts.fmt, spfmt=opts.spfmt)
gse_X = gse_X.as_matrix()
if (opts.mltl):
gse_Y = gse_Y.as_matrix()
if (len(gse_Y.shape) == 1 or gse_Y.shape[1] == 1):
gse_Y = gse_Y.reshape((gse_Y.shape[0],))
else:
gse_Y = gse_Y.as_matrix().reshape((gse_Y.shape[0],))
## Parameter tuning for GSE
print 'Parameter tuning for GSE is starting ...'
ext_params = dict(folds=opts.kfold, n_iter=opts.maxt)
params_generator = gen_mdl_params() if opts.dend is None else gen_nnmdl_params(gse_X.shape[1], gse_Y.shape[1] if len(gse_Y.shape) > 1 else 1)
for mdl_name, mdl, params in params_generator:
params.update(ext_params)
print 'Tuning hyperparameters for %s' % mdl_name
pt_result = txtclf.tune_param_optunity(mdl_name, mdl, gse_X, gse_Y, scoring='f1', optfunc='max', solver=opts.solver.replace('_', ' '), params=params, mltl=opts.mltl, avg='micro' if opts.avg == 'all' else opts.avg, n_jobs=opts.np)
io.write_npz(dict(zip(['best_params', 'best_score', 'score_avg_cube', 'score_std_cube', 'dim_names', 'dim_vals'], pt_result)), 'gse_%s_param_tuning_for_%s_%s' % (opts.solver.lower().replace(' ', '_'), mdl_name.replace(' ', '_').lower(), 'all' if (pid == -1) else pid))
def tuning_gsm():
from sklearn.model_selection import KFold
if (opts.mltl):
pid = -1
else:
pid = opts.pid
print 'Process ID: %s' % pid
## Load data for GSM
gsm_Xs, gsm_Ys = load_data(type='gsm-clf', pid=pid, fmt=opts.fmt, spfmt=opts.spfmt)
gsm_Ys = [Y.as_matrix() if len(Y.shape) > 1 and Y.shape[1] > 1 else Y.as_matrix().reshape((Y.shape[0],)) for Y in gsm_Ys]
## Parameter tuning for GSM
print 'Parameter tuning for GSM is starting ...'
for i, (gsm_X, gsm_y) in enumerate(zip(gsm_Xs, gsm_Ys)):
gsm_X = gsm_X.as_matrix()
ext_params = dict(folds=opts.kfold, n_iter=opts.maxt)
params_generator = gen_mdl_params() if opts.dend is None else gen_nnmdl_params(gsm_X.shape[1], gsm_y.shape[1] if len(gsm_y.shape) > 1 else 1)
for mdl_name, mdl, params in params_generator:
params.update(ext_params)
print 'Tuning hyperparameters for %s in label %i' % (mdl_name, i)
pt_result = txtclf.tune_param_optunity(mdl_name, mdl, gsm_X, gsm_y, scoring='f1', optfunc='max', solver=opts.solver.replace('_', ' '), params=params, mltl=opts.mltl, avg='micro' if opts.avg == 'all' else opts.avg, n_jobs=opts.np)
io.write_npz(dict(zip(['best_params', 'best_score', 'score_avg_cube', 'score_std_cube', 'dim_names', 'dim_vals'], pt_result)), 'gsm_%s_param_tuning_for_%s_%s' % (opts.solver.lower().replace(' ', '_'), mdl_name.replace(' ', '_').lower(), i if opts.mltl else '_'.join([int(pid / 2), int(pid % 2)])))
def autoclf(type='gse'):
if (type == 'gse'):
autoclf_gse()
elif (type == 'gsm'):
autoclf_gsm()
else:
autoclf_gse()
autoclf_gsm()
def autoclf_gse():
from autosklearn.classification import AutoSklearnClassifier
global cfgr
if (opts.mltl):
pid = -1
else:
pid = opts.pid
print 'Process ID: %s' % pid
## Load data for GSE
gse_X, gse_Y = load_data(type='gse', pid=pid, fmt=opts.fmt, spfmt=opts.spfmt)
if (opts.mltl):
gse_Y = gse_Y.as_matrix()
if (len(gse_Y.shape) == 1 or gse_Y.shape[1] == 1):
gse_Y = gse_Y.reshape((gse_Y.shape[0],))
else:
gse_Y = gse_Y.as_matrix().reshape((gse_Y.shape[0],)) if (len(gse_Y.shape) == 2 and gse_Y.shape[1] == 1) else gse_Y.as_matrix()
## Automatic model selection for GSE
print 'Automatic model selection for GSE'
autoclf = AutoSklearnClassifier()
autoclf.fit(gse_X, gse_Y)
io.write_obj(autoclf, 'autoclf_gse.mdl')
print 'Selected model:'
print show_models()
def autoclf_gsm():
from autosklearn.classification import AutoSklearnClassifier
global cfgr
if (opts.mltl):
pid = -1
else:
pid = opts.pid
print 'Process ID: %s' % pid
## Load data for GSM
gsm_Xs, gsm_Ys = load_data(type='gsm-clf', pid=pid, fmt=opts.fmt, spfmt=opts.spfmt)
gsm_Ys = [Y.as_matrix() if len(Y.shape) > 1 and Y.shape[1] > 1 else Y.as_matrix().reshape((Y.shape[0],)) for Y in gsm_Ys]
## Automatic model selection for GSM
for i, (X, Y) in enumerate(zip(gsm_Xs, gsm_Ys)):
print 'Automatic model selection for GSM in label %i' % i
autoclf = AutoSklearnClassifier()
autoclf.fit(gse_X, gse_Y)
io.write_obj(autoclf, 'autoclf_gsm_%i.mdl' % i)
print 'Selected model for label %i:' % i
print show_models()
def demo():
import urllib, shutil
global cfgr
url_prefix = 'https://data.mendeley.com/datasets/y7gnb79gfb/2/files/'
common_cfg = cfgr('gsx_extrc', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
fs.mkdir('data')
io.inst_print('Downloading data for GSE ...')
urllib.urlretrieve (url_prefix+'570cb239-793a-4a47-abf2-979fe432a2b4/udt_gse_X.npz', 'data/gse_X.npz')
urllib.urlretrieve (url_prefix+'a5b3e6fd-ef9f-4157-9d9b-a49c240a8b77/gse_Y.npz', 'data/gse_Y.npz')
io.inst_print('Finish downloading data for GSE!')
gsc.DATA_PATH = 'data'
orig_wd = os.getcwd()
## Cross-validation for GSE
gse_X, gse_Y = load_data(type='gse', pid=-1, fmt=opts.fmt, spfmt=opts.spfmt)
gse_Y = gse_Y.as_matrix()
new_wd = os.path.join(orig_wd, 'gse_cv')
fs.mkdir(new_wd)
os.chdir(new_wd)
def gse_model_iter(tuned, glb_filtnames, glb_clfnames):
yield 'UDT-RF', Pipeline([('clf', build_model(RandomForestClassifier, 'Classifier', 'Random Forest GSE', tuned=True, pr=pr, mltl=True, mltp=True, n_jobs=1, random_state=0))])
txtclf.cross_validate(gse_X, gse_Y, gse_model_iter, model_param=dict(tuned=True, glb_filtnames=[], glb_clfnames=[]), avg='micro', kfold=5, cfg_param=cfgr('bionlp.txtclf', 'cross_validate'), global_param=dict(comb=True, pl_names=[], pl_set=set([])), lbid=-1)
os.chdir(orig_wd)
## Cross-validation for GSM
io.inst_print('Downloading data for GSM ...')
urllib.urlretrieve (url_prefix+'8731e767-20b1-42ba-b3cc-38573df643c9/udt_gsm_X_0.npz', 'data/gsm_X_0.npz')
urllib.urlretrieve (url_prefix+'b4139560-f1f7-4f0c-9591-7214ffd295dc/udt_gsm_X_1.npz', 'data/gsm_X_1.npz')
urllib.urlretrieve (url_prefix+'fb510fa1-6a0b-4613-aa9d-3b3728acae06/udt_gsm_X_2.npz', 'data/gsm_X_2.npz')
urllib.urlretrieve (url_prefix+'18316de5-1478-4503-adcc-7f0fff581470/gsm_y_0.npz', 'data/gsm_y_0.npz')
urllib.urlretrieve (url_prefix+'0b0b84c0-9796-47e2-92e8-7a3a621a8cfe/gsm_y_1.npz', 'data/gsm_y_1.npz')
urllib.urlretrieve (url_prefix+'ea1acea3-4a99-479b-b5af-0a1f66a163c4/gsm_y_2.npz', 'data/gsm_y_2.npz')
urllib.urlretrieve (url_prefix+'f26b66ae-4887-451d-8067-933cacb4dad3/gsm_lb_0.npz', 'data/gsm_lb_0.npz')
urllib.urlretrieve (url_prefix+'59fd8bd2-405c-4e85-9989-ed79bac4b676/gsm_lb_1.npz', 'data/gsm_lb_1.npz')
urllib.urlretrieve (url_prefix+'9c08969d-579f-4695-b0a1-94226e0df495/gsm_lb_2.npz', 'data/gsm_lb_2.npz')
io.inst_print('Finish downloading data for GSM!')
gsm_Xs, gsm_Ys = load_data(type='gsm-clf', pid=-1, fmt=opts.fmt, spfmt=opts.spfmt)
gsm_Ys = [Y.as_matrix() if len(Y.shape) > 1 and Y.shape[1] > 1 else Y.as_matrix().reshape((Y.shape[0],)) for Y in gsm_Ys]
new_wd = os.path.join(orig_wd, 'gsm_cv')
fs.mkdir(new_wd)
os.chdir(new_wd)
orig_subwd = os.getcwd()
for i, (X, Y) in enumerate(zip(gsm_Xs, gsm_Ys)):
# Switch to sub-working directory
new_wd = os.path.join(orig_subwd, str(i))
fs.mkdir(new_wd)
os.chdir(new_wd)
def gsm_model_iter(tuned, glb_filtnames, glb_clfnames, **kwargs):
yield 'UDT-RF', Pipeline([('clf', build_model(RandomForestClassifier, 'Classifier', 'Random Forest GSM%i'%i, tuned=True, pr=pr, mltl=True, mltp=True, n_jobs=1, random_state=0))])
txtclf.cross_validate(X, Y, gsm_model_iter, model_param=dict(tuned=True, glb_filtnames=[], glb_clfnames=[]), avg='micro', kfold=5, cfg_param=cfgr('bionlp.txtclf', 'cross_validate'), global_param=dict(comb=True, pl_names=[], pl_set=set([])), lbid=i)
os.chdir(orig_subwd)
os.chdir(orig_wd)
## Clustering for GSM
fs.mkdir('orig')
gsc.DATA_PATH = 'orig'
io.inst_print('Downloading data for GSM clustering ...')
urllib.urlretrieve (url_prefix+'c1f2eb4d-e859-4502-ab48-de6bdc237d33/orig_gsm_X_0.npz', 'orig/gsm_X_0.npz')
urllib.urlretrieve (url_prefix+'e0630d62-b6ea-4511-bb7f-37b25e70a563/orig_gsm_X_1.npz', 'orig/gsm_X_1.npz')
urllib.urlretrieve (url_prefix+'63d8a8e0-8025-407b-be40-99a31bf8b75f/orig_gsm_X_2.npz', 'orig/gsm_X_2.npz')
urllib.urlretrieve (url_prefix+'3a28e780-2724-4b20-bb0d-ec808ef6cf58/gsm2gse.npz', 'orig/gsm2gse.npz')
shutil.copy2('data/gsm_y_0.npz', 'orig/gsm_y_0.npz')
shutil.copy2('data/gsm_y_1.npz', 'orig/gsm_y_1.npz')
shutil.copy2('data/gsm_y_2.npz', 'orig/gsm_y_2.npz')
shutil.copy2('data/gsm_lb_0.npz', 'orig/gsm_lb_0.npz')
shutil.copy2('data/gsm_lb_1.npz', 'orig/gsm_lb_1.npz')
shutil.copy2('data/gsm_lb_2.npz', 'orig/gsm_lb_2.npz')
urllib.urlretrieve (url_prefix+'18316de5-1478-4503-adcc-7f0fff581470/gsm_y_0.npz', 'orig/gsm_y_0.npz')
urllib.urlretrieve (url_prefix+'0b0b84c0-9796-47e2-92e8-7a3a621a8cfe/gsm_y_1.npz', 'orig/gsm_y_1.npz')
urllib.urlretrieve (url_prefix+'ea1acea3-4a99-479b-b5af-0a1f66a163c4/gsm_y_2.npz', 'orig/gsm_y_2.npz')
urllib.urlretrieve (url_prefix+'f26b66ae-4887-451d-8067-933cacb4dad3/gsm_lb_0.npz', 'orig/gsm_lb_0.npz')
urllib.urlretrieve (url_prefix+'59fd8bd2-405c-4e85-9989-ed79bac4b676/gsm_lb_1.npz', 'orig/gsm_lb_1.npz')
urllib.urlretrieve (url_prefix+'9c08969d-579f-4695-b0a1-94226e0df495/gsm_lb_2.npz', 'orig/gsm_lb_2.npz')
io.inst_print('Finish downloading data for GSM clustering!')
gsm_Xs, gsm_Ys, clt_labels, gsm2gse = load_data(type='gsm-clt', pid=-1, fmt=opts.fmt, spfmt=opts.spfmt)
# Select data
slct_gse = ['GSE14024', 'GSE4302', 'GSE54464', 'GSE5230', 'GSE6015', 'GSE4262', 'GSE11506', 'GSE8597', 'GSE43696', 'GSE13984', 'GSE1437', 'GSE12446', 'GSE41035', 'GSE5225', 'GSE53394', 'GSE30174', 'GSE16683', 'GSE6476', 'GSE1988', 'GSE32161', 'GSE16032', 'GSE2362', 'GSE46924', 'GSE4668', 'GSE4587', 'GSE1413', 'GSE3325', 'GSE484', 'GSE20054', 'GSE51207', 'GSE23702', 'GSE2889', 'GSE2880', 'GSE11237', 'GSE3189', 'GSE52711', 'GSE5007', 'GSE5315', 'GSE55760', 'GSE6878', 'GSE9118', 'GSE10748', 'GSE31773', 'GSE54657', 'GSE27011', 'GSE2600', 'GSE16874', 'GSE1468', 'GSE1566', 'GSE3868', 'GSE52452', 'GSE60413', 'GSE35765', 'GSE55945', 'GSE6887', 'GSE1153', 'GSE26309', 'GSE3418', 'GSE18965', 'GSE30076', 'GSE33223', 'GSE2606', 'GSE26910', 'GSE26834', 'GSE1402', 'GSE29077', 'GSE2195', 'GSE4768', 'GSE2236', 'GSE39452', 'GSE13044', 'GSE1588', 'GSE4514', 'GSE24592', 'GSE31280', 'GSE2018']
slct_idx = gsm2gse.index[[i for i, x in enumerate(gsm2gse['gse_id']) if x in slct_gse]]
slct_idx_set = set(slct_idx)
new_wd = os.path.join(orig_wd, 'gsm_clt')
fs.mkdir(new_wd)
os.chdir(new_wd)
orig_subwd = os.getcwd()
for i, (X, y, c) in enumerate(zip(gsm_Xs, clt_labels, gsm_Ys)):
idx = np.array(list(set(X.index) & slct_idx_set))
X, y, c = X.loc[idx], y.loc[idx], c.loc[idx]
y = y.as_matrix() if len(y.shape) > 1 and y.shape[1] > 1 else y.as_matrix().reshape((y.shape[0],))
c = c.as_matrix()
# Switch to sub-working directory
new_wd = os.path.join(orig_subwd, str(i))
fs.mkdir(new_wd)
os.chdir(new_wd)
def clt_model_iter(tuned, glb_filtnames, glb_cltnames=[], **kwargs):
yield 'GESgnExt', Pipeline([('clt', kallima.Kallima(metric='euclidean', method='mstcut', cut_method='normcut', cut_step=0.01, cns_ratio=0.5, nn_method='rnn', nn_param=0.5, max_cltnum=1500, coarse=0.4, rcexp=1, cond=0.3, cross_merge=False, merge_all=False, save_g=True, n_jobs=opts.np))])
txtclt.clustering(X, clt_model_iter, model_param=dict(tuned=False, glb_filtnames=[], glb_cltnames=[]), cfg_param=cfgr('bionlp.txtclt', 'clustering'), global_param=dict(comb=True, pl_names=[], pl_set=set([])), lbid=i)
os.chdir(orig_subwd)
os.chdir(orig_wd)
fs.mkdir(opts.cache)
for i in range(3):
for fpath in fs.listf('gsm_clt/%i'%i, 'clt_pred_.*.npz', full_path=True):
shutil.copy2(fpath, os.path.join(opts.cache, os.path.basename(fpath)))
## Signature Generation
import zipfile, tarfile
fs.mkdir('demo/xml')
fs.mkdir('gedata')
fs.mkdir('dge')
io.inst_print('Downloading the demo data ...')
urllib.urlretrieve (url_prefix+'83581784-4e92-4a45-97da-7204a2c51272/demo_gse_doc.pkl', 'demo/xml/gse_doc.pkl')
urllib.urlretrieve (url_prefix+'bbd3a925-0258-4ea6-a16b-eb61b71bef14/demo_gsm_doc.pkl', 'demo/xml/gsm_doc.pkl')
urllib.urlretrieve (url_prefix+'1a13eec7-b409-4ec0-840a-c5f4a3095ff9/demo_gse_X.npz', 'demo/gse_X.npz')
urllib.urlretrieve (url_prefix+'a24c2571-7c82-4bc7-89ae-4dd847f92f1e/demo_gsm_X.npz', 'demo/gsm_X.npz')
urllib.urlretrieve (url_prefix+'f861dfaa-84c6-450d-8f92-611f1ae0c28f/demo_gsm_X_0.npz', 'demo/gsm_X_0.npz')
urllib.urlretrieve (url_prefix+'3980bd3f-3f65-4061-a800-72443846867e/demo_gsm_X_1.npz', 'demo/gsm_X_1.npz')
urllib.urlretrieve (url_prefix+'62779165-1ab4-444f-90d8-e36180aee1f2/demo_gsm_X_2.npz', 'demo/gsm_X_2.npz')
urllib.urlretrieve (url_prefix+'c11f9e1b-acb3-46bd-b53a-8dcfa20c9678/demo_gsm_y_0.npz', 'demo/gsm_y_0.npz')
urllib.urlretrieve (url_prefix+'db3c095c-c4fc-4b96-a176-1be9ab3c16ae/demo_gsm_y_1.npz', 'demo/gsm_y_1.npz')
urllib.urlretrieve (url_prefix+'9b047f1d-c243-4b6d-8b86-4ceac49fe9a3/demo_gsm_y_2.npz', 'demo/gsm_y_2.npz')
urllib.urlretrieve (url_prefix+'7102ec6a-f7ba-41b8-9d2f-5999b73c4c1c/demo_gsm_lb_0.npz', 'demo/gsm_lb_0.npz')
urllib.urlretrieve (url_prefix+'b0418f41-e26d-4b3e-b684-b194d43b4d78/demo_gsm_lb_1.npz', 'demo/gsm_lb_1.npz')
urllib.urlretrieve (url_prefix+'440afe45-7a47-456a-b94b-03bfece032b1/demo_gsm_lb_2.npz', 'demo/gsm_lb_2.npz')
urllib.urlretrieve (url_prefix+'34f2ad12-33b0-45d3-86d5-9db8ee53ff2f/demo_data.tar.gz', 'demo/xml/demo_data.tar.gz')
urllib.urlretrieve (url_prefix+'0b3e472f-389e-4140-a039-26d8ebe7d760/demo_gedata.tar.gz', 'gedata.tar.gz')
urllib.urlretrieve (url_prefix+'594dfa6b-7e8a-4d10-9aef-f2a21c49cff2/demo_dge.tar.gz', 'dge.tar.gz')
with tarfile.open('demo/xml/demo_data.tar.gz', 'r:gz') as tarf:
tarf.extractall('demo/xml')
with tarfile.open('gedata.tar.gz', 'r:gz') as tarf:
tarf.extractall()
with tarfile.open('dge.tar.gz', 'r:gz') as tarf:
tarf.extractall()
shutil.copy2('orig/gsm2gse.npz', 'demo/gsm2gse.npz')
io.inst_print('Finish downloading the demo data!')
gsc.DATA_PATH = 'demo'
gsc.GEO_PATH = 'demo'
opts.mltl = True
# gen_sgn(sample_dir='demo', ge_dir='gedata', dge_dir='dge')
# Signature cache
io.inst_print('Downloading the signature cache ...')
urllib.urlretrieve (url_prefix+'4ab4f2ac-34d8-4dc0-8dc1-42b2ad2fa7bd/demo_signatures.zip', 'signatures.zip')
with zipfile.ZipFile('signatures.zip', 'r') as zipf:
zipf.extractall()
io.inst_print('Finish downloading the signature cache!')
## Calculate the signature similarity network
helper.opts = opts
method = 'cd'
io.inst_print('Downloading the cache of signature similarity network ...')
urllib.urlretrieve (url_prefix+'37c6ae5f-1b0f-4447-88af-347e28d7d840/demo_simmt.tar.gz', 'simmt.tar.gz')
with tarfile.open('simmt.tar.gz', 'r:gz') as tarf:
tarf.extractall()
io.inst_print('Finish downloading the cache of signature similarity network!')
if (not os.path.exists('simmt.npz')):
for sgnf in ['disease_signature.csv', 'drug_perturbation.csv', 'gene_perturbation.csv']:
basename = os.path.splitext(os.path.basename(sgnf))[0]
cache_path = os.path.join('dge', 'cache', basename)
excel_df = pd.read_csv(sgnf)
helper._sgn2dge(excel_df, method, os.path.join('gedata', basename), os.path.join('dge', method.lower(), basename), cache_path)
helper.dge2simmt(loc='disease_signature.csv;;drug_perturbation.csv;;gene_perturbation.csv', signed=1, weighted=0, sim_method='ji', method='cd', dge_dir='dge', idx_cols='disease_name;;drug_name;;gene_symbol', output='.')
## Circos Plot
io.inst_print('Downloading the circos cache ...')
urllib.urlretrieve (url_prefix+'b81e06fa-8518-48a2-bc1a-21d864445978/demo_circos_cache.tar.gz', 'circos_cache.tar.gz')
with tarfile.open('circos_cache.tar.gz', 'r:gz') as tarf:
tarf.extractall()
io.inst_print('Finish downloading the circos cache!')
helper.plot_circos(loc='.', topk=8, topi=5, data='data.npz', simmt='simmt.npz', dizs='disease_signature.csv;;dge/limma/disease_signature', drug='drug_perturbation.csv;;dge/limma/drug_perturbation', gene='gene_perturbation.csv;;dge/limma/gene_perturbation')
def main():
if (opts.tune):
tuning(opts.ftype)
return
if (opts.method == 'demo'):
demo()
return
elif (opts.method == 'gse_clf'):
gse_clf()
return
elif (opts.method == 'gsm_clf'):
gsm_clf()
return
elif (opts.method == 'gsm_clt'):
gsm_clt()
return
elif (opts.method == 'gen_sgn'):
gen_sgn()
return
elif (opts.method == 'autoclf'):
autoclf(opts.ftype)
return
all_entry()
if __name__ == '__main__':
# Parse commandline arguments
op = OptionParser()
op.add_option('-k', '--kfold', default=10, action='store', type='int', dest='kfold', help='indicate the K fold cross validation')
op.add_option('-p', '--pid', default=-1, action='store', type='int', dest='pid', help='indicate the process ID')
op.add_option('-n', '--np', default=-1, action='store', type='int', dest='np', help='indicate the number of processes used for calculation')
op.add_option('-f', '--fmt', default='npz', help='data stored format: csv or npz [default: %default]')
op.add_option('-s', '--spfmt', default='csr', help='sparse data stored format: csr or csc [default: %default]')
op.add_option('-t', '--tune', action='store_true', dest='tune', default=False, help='firstly tune the hyperparameters')
op.add_option('-r', '--solver', default='particle_swarm', action='store', type='str', dest='solver', help='solver used to tune the hyperparameters: particle_swarm, grid_search, or random_search, etc.')
op.add_option('-b', '--best', action='store_true', dest='best', default=False, help='use the tuned hyperparameters')
op.add_option('-c', '--comb', action='store_true', dest='comb', default=False, help='run the combined methods')
op.add_option('-l', '--mltl', action='store_true', dest='mltl', default=False, help='use multilabel strategy')
op.add_option('-a', '--avg', default='micro', help='averaging strategy for performance metrics: micro or macro [default: %default]')
op.add_option('-e', '--ftype', default='gse', type='str', dest='ftype', help='the document type used to generate data')
op.add_option('-u', '--fuzzy', action='store_true', dest='fuzzy', default=False, help='use fuzzy clustering')
op.add_option('-j', '--thrshd', default='mean', type='str', dest='thrshd', help='threshold value')
op.add_option('-w', '--cache', default='.cache', help='the location of cache files')
op.add_option('-y', '--maxt', default=32, action='store', type='int', dest='maxt', help='indicate the maximum number of trials')
op.add_option('-d', '--dend', dest='dend', help='deep learning backend: tf or th')
op.add_option('-z', '--bsize', default=32, action='store', type='int', dest='bsize', help='indicate the batch size used in deep learning')
op.add_option('-o', '--omp', action='store_true', dest='omp', default=False, help='use openmp multi-thread')
op.add_option('-g', '--gpunum', default=1, action='store', type='int', dest='gpunum', help='indicate the gpu device number')
op.add_option('-q', '--gpuq', dest='gpuq', help='prefered gpu device queue')
op.add_option('-i', '--input', default='gsc', help='input source: gsc or geo [default: %default]')
op.add_option('-x', '--pred', action='store_true', dest='pred', default=False, help='train the model and make predictions without cross-validation')
op.add_option('-m', '--method', help='main method to run')
op.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='display detailed information')
(opts, args) = op.parse_args()
if len(args) > 0:
op.print_help()
op.error('Please input options instead of arguments.')
sys.exit(1)
# Option Correcting
if (opts.spfmt.lower() in ['', ' ', 'none']): opts.spfmt = None
# Logging setting
logging.basicConfig(level=logging.INFO if opts.verbose else logging.ERROR, format='%(asctime)s %(levelname)s %(message)s')
# Data Source
spdr = SPDR_MAP[opts.input]
# Parse config file
if (os.path.exists(CONFIG_FILE)):
cfgr = io.cfg_reader(CONFIG_FILE)
spdr_cfg = cfgr('bionlp.spider.%s' % opts.input, 'init')
if (len(spdr_cfg) > 0):
if (spdr_cfg['DATA_PATH'] is not None and os.path.exists(spdr_cfg['DATA_PATH'])):
spdr.DATA_PATH = spdr_cfg['DATA_PATH']
if (spdr_cfg['GEO_PATH'] is not None and os.path.exists(spdr_cfg['GEO_PATH'])):
spdr.GEO_PATH = spdr_cfg['GEO_PATH']
if (spdr_cfg['ONTO_PATH'] is not None and os.path.exists(spdr_cfg['ONTO_PATH'])):
spdr.ONTO_PATH = spdr_cfg['ONTO_PATH']
if (spdr_cfg['HGNC_PATH'] is not None and os.path.exists(spdr_cfg['HGNC_PATH'])):
spdr.HGNC_PATH = spdr_cfg['HGNC_PATH']
if (spdr_cfg['DNORM_PATH'] is not None and os.path.exists(spdr_cfg['DNORM_PATH'])):
spdr.DNORM_PATH = spdr_cfg['DNORM_PATH']
if (spdr_cfg['RXNAV_PATH'] is not None and os.path.exists(spdr_cfg['RXNAV_PATH'])):
spdr.RXNAV_PATH = spdr_cfg['RXNAV_PATH']
hgnc_cfg = cfgr('bionlp.spider.hgnc', 'init')
if (len(hgnc_cfg) > 0):
if (hgnc_cfg['MAX_TRIAL'] is not None and hgnc_cfg['MAX_TRIAL'] > 0):
hgnc.MAX_TRIAL = hgnc_cfg['MAX_TRIAL']
plot_cfg = cfgr('bionlp.util.plot', 'init')
plot_common = cfgr('bionlp.util.plot', 'common')
txtclf.init(plot_cfg=plot_cfg, plot_common=plot_common)
txtclt.init(plot_cfg=plot_cfg, plot_common=plot_common)
if (opts.dend is not None):
if (opts.dend == 'th' and opts.gpunum == 0 and opts.omp):
from multiprocessing import cpu_count
os.environ['OMP_NUM_THREADS'] = '4' if opts.tune else str(int(1.5 * cpu_count() / opts.np))
if (opts.gpuq is not None):
gpuq = [int(x) for x in opts.gpuq.split(',')]
dev_id = gpuq[opts.pid % len(gpuq)]
else:
dev_id = opts.pid % opts.gpunum if opts.gpunum > 0 else 0
kerasext.init(dev_id, opts.gpunum, opts.dend, opts.np, opts.omp)
annot.init()
main() | apache-2.0 |
shaneknapp/spark | python/pyspark/pandas/ml.py | 6 | 4119 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Tuple, TYPE_CHECKING, cast
import numpy as np
import pandas as pd
import pyspark
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.stat import Correlation
from pyspark.pandas.utils import column_labels_level
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
CORRELATION_OUTPUT_COLUMN = "__correlation_output__"
def corr(psdf: "ps.DataFrame", method: str = "pearson") -> pd.DataFrame:
"""
The correlation matrix of all the numerical columns of this dataframe.
Only accepts scalar numerical values for now.
:param psdf: the pandas-on-Spark dataframe.
:param method: {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
:return: :class:`pandas.DataFrame`
>>> ps.DataFrame({'A': [0, 1], 'B': [1, 0], 'C': ['x', 'y']}).corr()
A B
A 1.0 -1.0
B -1.0 1.0
"""
assert method in ("pearson", "spearman")
ndf, column_labels = to_numeric_df(psdf)
corr = Correlation.corr(ndf, CORRELATION_OUTPUT_COLUMN, method)
pcorr = cast(pd.DataFrame, corr.toPandas())
arr = pcorr.iloc[0, 0].toArray()
if column_labels_level(column_labels) > 1:
idx = pd.MultiIndex.from_tuples(column_labels)
else:
idx = pd.Index([label[0] for label in column_labels])
return pd.DataFrame(arr, columns=idx, index=idx)
def to_numeric_df(psdf: "ps.DataFrame") -> Tuple[pyspark.sql.DataFrame, List[Tuple]]:
"""
Takes a dataframe and turns it into a dataframe containing a single numerical
vector of doubles. This dataframe has a single field called '_1'.
TODO: index is not preserved currently
:param psdf: the pandas-on-Spark dataframe.
:return: a pair of dataframe, list of strings (the name of the columns
that were converted to numerical types)
>>> to_numeric_df(ps.DataFrame({'A': [0, 1], 'B': [1, 0], 'C': ['x', 'y']}))
(DataFrame[__correlation_output__: vector], [('A',), ('B',)])
"""
# TODO, it should be more robust.
accepted_types = {
np.dtype(dt)
for dt in [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.bool_]
}
numeric_column_labels = [
label for label in psdf._internal.column_labels if psdf[label].dtype in accepted_types
]
numeric_df = psdf._internal.spark_frame.select(
*[psdf._internal.spark_column_for(idx) for idx in numeric_column_labels]
)
va = VectorAssembler(inputCols=numeric_df.columns, outputCol=CORRELATION_OUTPUT_COLUMN)
v = va.transform(numeric_df).select(CORRELATION_OUTPUT_COLUMN)
return v, numeric_column_labels
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.ml
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.ml.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = SparkSession.builder.master("local[4]").appName("pyspark.pandas.ml tests").getOrCreate()
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.ml, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
WesleyyC/Restaurant-Revenue-Prediction | Ari/testing_grounds/feat_drop.py | 2 | 1175 | import numpy as np
import pandas as pd
###############################################################################
# Load data
df_train = pd.read_csv("train_numerical_head.csv")
df_train.head()
feats = df_train.drop(str(42), axis=1)
X_train = feats.values #features
y_train = df_train[str(42)].values #target
df_test = pd.read_csv("test_numerical_head.csv")
df_train.head()
X_test = feats.values #features
###############################################################################
# Drop features
p_to_drop = [ 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 1, 1, 1,
1, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0,
0, 0]
for i in range(5, 42):
print i
if p_to_drop[i-5] == 0:
df_train = df_train.drop(str(i), axis=1)
df_test = df_test.drop(str(i), axis=1)
###############################################################################
# Save to File
df_train = np.asarray(df_train)
df_train = np.asarray(df_test)
np.savetxt("result_train.csv", df_train, delimiter=",")
np.savetxt("result_test.csv", df_test, delimiter=",")
#plot_r2(y, y_pred2, "Performance of GradientBoostingRegressor")
#plt.show()
#r2_score(y, y_pred2)
| mit |
jamesrobertlloyd/automl-phase-1 | util.py | 1 | 7705 | from __future__ import division
__author__ = 'James Robert Lloyd'
__description__ = 'Miscellaneous utility functions'
import os
from glob import glob
import tempfile
import string
import random
import matplotlib.pyplot as pl
import numpy as np
import scipy.io
from sklearn.cross_validation import KFold
def mkstemp_safe(directory, suffix):
"""Avoids a file handle leak present on some operating systems"""
(os_file_handle, file_name) = tempfile.mkstemp(dir=directory, suffix=suffix)
os.close(os_file_handle)
return file_name
def callback_1d(model, bounds, info, x, index, ftrue):
"""
Plot the current posterior, the index, and the value of the current
recommendation.
"""
xmin, xmax = bounds[0]
xx_ = np.linspace(xmin, xmax, 500) # define grid
xx = xx_[:, None]
# ff = ftrue(xx) # compute true function
acq = index(xx) # compute acquisition
mu, s2 = model.posterior(xx) # compute posterior and
lo = mu - 2 * np.sqrt(s2) # quantiles
hi = mu + 2 * np.sqrt(s2)
# ymin, ymax = ff.min(), ff.max() # get plotting ranges
ymin, ymax = lo.min(), hi.max() # get plotting ranges FIXME - remember observed function values
ymin -= 0.2 * (ymax - ymin)
ymax += 0.2 * (ymax - ymin)
kwplot = {'lw': 2, 'alpha': 0.5} # common plotting kwargs
fig = pl.figure(1)
fig.clf()
pl.subplot(221)
# pl.plot(xx, ff, 'k:', **kwplot) # plot true function
pl.plot(xx, mu, 'b-', **kwplot) # plot the posterior and
pl.fill_between(xx_, lo, hi, color='b', alpha=0.1) # uncertainty bands
pl.scatter(info['x'], info['y'], # plot data
marker='o', facecolor='none', zorder=3)
pl.axvline(x, color='r', **kwplot) # latest selection
pl.axvline(info[-1]['xbest'], color='g', **kwplot) # current recommendation
pl.axis((xmin, xmax, ymin, ymax))
pl.ylabel('posterior')
pl.subplot(223)
pl.fill_between(xx_, acq.min(), acq, # plot acquisition
color='r', alpha=0.1)
pl.axis('tight')
pl.axvline(x, color='r', **kwplot) # plot latest selection
pl.xlabel('input')
pl.ylabel('acquisition')
pl.subplot(222)
pl.plot(ftrue(info['xbest']), 'g', **kwplot) # plot performance
pl.axis((0, len(info['xbest']), ymin, ymax))
pl.xlabel('iterations')
pl.ylabel('value of recommendation')
# for ax in fig.axes: # remove tick labels
# ax.set_xticklabels([])
# ax.set_yticklabels([])
pl.draw()
pl.show(block=False)
def mkdir(d):
if not os.path.exists(d):
os.makedirs(d)
def ls(filename):
return sorted(glob(filename))
def colorbrew(i):
"""Nice colors taken from http://colorbrewer2.org/ by David Duvenaud March 2012"""
rgbs = [(228, 26, 28),
(055, 126, 184),
(077, 175, 74),
(152, 78, 163),
(255, 127, 000),
(255, 255, 051),
(166, 86, 040),
(247, 129, 191),
(153, 153, 153),
(000, 000, 000)]
# Convert to [0, 1] range
rgbs = [(r / 255, g / 255, b / 255) for (r, g, b) in rgbs]
# Return color corresponding to index - wrapping round
return rgbs[i % len(rgbs)]
def convert_mat_into_automl_folds(filename, save_folder_root, time_budget=300, n_folds=5, input_type='Numerical',
random_state=0, metric='auc_metric', usage='testing', task='binary.classification',
target_type='Binary'):
"""Convert a dataset in .mat format into several folds of automl format"""
# Load data
data = scipy.io.loadmat(filename)
X = data['X']
y = data['y']
data_name = os.path.splitext(os.path.split(filename)[-1])[0]
# Convert data if appropriate
if task == 'binary.classification':
y_max = y.max()
y[y == y_max] = 1
y[y < y_max] = 0
# If input_type is 'infer' we now infer input types
if input_type == 'infer':
raise Exception('I do not know how to infer input types yet')
else:
input_type_list = [input_type] * X.shape[1]
# Create info dictionary
# TODO - some of these defaults need to be changed
info = dict(usage=usage, name=data_name, task=task, target_type=target_type,
feat_type='Numerical', metric=metric, feat_num=X.shape[1],
target_num=1, label_num=0, has_categorical=0, has_missing=0, is_sparse=0,
time_budget=time_budget, valid_num=0)
# Now split into folds and save
folds = KFold(n=X.shape[0], n_folds=n_folds, shuffle=True, random_state=random_state)
for (fold, (train_index, test_index)) in enumerate(folds):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
fold_folder = os.path.join(save_folder_root + '_fold_%02d' % (fold + 1), data_name)
mkdir(fold_folder)
fmt = '%f'
np.savetxt(os.path.join(fold_folder, data_name + '_train.data'), X_train, fmt=fmt, delimiter=' ')
np.savetxt(os.path.join(fold_folder, data_name + '_test.data'), X_test, fmt=fmt, delimiter=' ')
if task == 'binary.classification':
fmt = '%d'
np.savetxt(os.path.join(fold_folder, data_name + '_train.solution'), y_train, fmt=fmt, delimiter=' ')
np.savetxt(os.path.join(fold_folder, data_name + '_test.solution'), y_test, fmt=fmt, delimiter=' ')
info['train_num'] = X_train.shape[0]
info['test_num'] = X_test.shape[0]
with open(os.path.join(fold_folder, data_name + '_public.info'), 'w') as info_file:
for (key, value) in info.iteritems():
info_file.write('%s = %s\n' % (key, value))
with open(os.path.join(fold_folder, data_name + '_feat.type'), 'w') as feature_file:
for feat_type in input_type_list:
feature_file.write('%s\n' % feat_type)
def convert_mat_into_automl_folds_folder(mat_folder, save_folder_root, *args, **kwargs):
"""Converts a folder"""
filenames = sorted(os.listdir(mat_folder))
for filename in filenames:
if filename.endswith('.mat'):
print('Processing ' + filename)
convert_mat_into_automl_folds(os.path.join(mat_folder, filename), save_folder_root,
*args, **kwargs)
def create_synthetic_classification_problems(mat_folder, save_folder_root, synth_kwargs_list):
pass
def VmB(pid, VmKey):
scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
try:
t = open('/proc/%d/status' % pid)
v = t.read()
t.close()
except:
return -1 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * scale[v[2]]
def memory_usage(pid, since=0.0):
"""Return memory usage in bytes."""
return VmB(pid, 'VmSize:') - since
def resident_memory_usage(pid, since=0.0):
"""Return resident memory usage in bytes."""
return VmB(pid, 'VmRSS:') - since
def random_string(N=20):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)
for _ in range(N)) | mit |
yask123/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
LCAV/pyroomacoustics | pyroomacoustics/beamforming.py | 1 | 44338 | # Various Beamforming Methods
# Copyright (C) 2019 Robin Scheibler, Sidney Barthe, Ivan Dokmanic
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
from __future__ import division
import numpy as np
import scipy.linalg as la
from .parameters import constants
from . import utilities as u
from .soundsource import build_rir_matrix
from . import windows
from . import transform
# =========================================================================
# Free (non-class-member) functions related to beamformer design
# =========================================================================
def H(A, **kwargs):
"""Returns the conjugate (Hermitian) transpose of a matrix."""
return np.transpose(A, **kwargs).conj()
def sumcols(A):
"""
Sums the columns of a matrix (np.array).
The output is a 2D np.array
of dimensions M x 1.
"""
return np.sum(A, axis=1, keepdims=1)
def mdot(*args):
"""
Left-to-right associative matrix multiplication of multiple 2D ndarrays.
"""
ret = args[0]
for a in args[1:]:
ret = np.dot(ret, a)
return ret
def distance(x, y):
"""
Computes the distance matrix E.
E[i,j] = sqrt(sum((x[:,i]-y[:,j])**2)).
x and y are DxN ndarray containing N D-dimensional vectors.
"""
# Assume x, y are arrays, *not* matrices
x = np.array(x)
y = np.array(y)
# return np.sqrt((x[0,:,np.newaxis]-y[0,:])**2 +
# (x[1,:,np.newaxis]-y[1,:])**2)
return np.sqrt(np.sum((x[:, :, np.newaxis] - y[:, np.newaxis, :]) ** 2, axis=0))
def unit_vec2D(phi):
return np.array([[np.cos(phi), np.sin(phi)]]).T
def linear_2D_array(center, M, phi, d):
"""
Creates an array of uniformly spaced linear points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M)
The array of points
"""
u = unit_vec2D(phi)
return (
np.array(center)[:, np.newaxis]
+ d * (np.arange(M)[np.newaxis, :] - (M - 1.0) / 2.0) * u
)
def circular_2D_array(center, M, phi0, radius):
"""
Creates an array of uniformly spaced circular points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points
phi0: float
The counterclockwise rotation of the first element in the array (from
the x-axis)
radius: float
The radius of the array
Returns
-------
ndarray (2, M)
The array of points
"""
phi = np.arange(M) * 2.0 * np.pi / M
return np.array(center)[:, np.newaxis] + radius * np.vstack(
(np.cos(phi + phi0), np.sin(phi + phi0))
)
def poisson_2D_array(center, M, d):
"""
Create array of 2D positions drawn from Poisson process.
Parameters
----------
center: array_like
The center of the array
M: int
The number of points in the first dimension
M: int
The number of points in the second dimension
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M * N)
The array of points
"""
from numpy.random import standard_exponential, randint
R = d * standard_exponential((2, M)) * (2 * randint(0, 2, (2, M)) - 1)
R = R.cumsum(axis=1)
R -= R.mean(axis=1)[:, np.newaxis]
R += np.array([center]).T
return R
def square_2D_array(center, M, N, phi, d):
"""
Creates an array of uniformly spaced grid points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points in the first dimension
M: int
The number of points in the second dimension
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M * N)
The array of points
"""
c = linear_2D_array(center, M, phi + np.pi / 2.0, d)
R = np.zeros((2, M * N))
for i in np.arange(M):
R[:, i * N : (i + 1) * N] = linear_2D_array(c[:, i], N, phi, d)
return R
def spiral_2D_array(center, M, radius=1.0, divi=3, angle=None):
"""
Generate an array of points placed on a spiral
Parameters
----------
center: array_like
location of the center of the array
M: int
number of microphones
radius: float
microphones are contained within a cirle of this radius (default 1)
divi: int
number of rotations of the spiral (default 3)
angle: float
the angle offset of the spiral (default random)
Returns
-------
ndarray (2, M * N)
The array of points
"""
num_seg = int(np.ceil(M / divi))
pos_array_norm = np.linspace(0, radius, num=M, endpoint=False)
pos_array_angle = (
np.reshape(
np.tile(np.pi * 2 * np.arange(divi) / divi, num_seg), (divi, -1), order="F"
)
+ np.linspace(0, 2 * np.pi / divi, num=num_seg, endpoint=False)[np.newaxis, :]
)
pos_array_angle = np.insert(pos_array_angle.flatten("F")[: M - 1], 0, 0)
if angle is None:
pos_array_angle += np.random.rand() * np.pi / divi
else:
pos_array_angle += angle
pos_mic_x = pos_array_norm * np.cos(pos_array_angle)
pos_mic_y = pos_array_norm * np.sin(pos_array_angle)
return np.array([pos_mic_x, pos_mic_y])
def fir_approximation_ls(weights, T, n1, n2):
freqs_plus = np.array(weights.keys())[:, np.newaxis]
freqs = np.vstack([freqs_plus, -freqs_plus])
omega = 2 * np.pi * freqs
omega_discrete = omega * T
n = np.arange(n1, n2)
# Create the DTFT transform matrix corresponding to a discrete set of
# frequencies and the FIR filter indices
F = np.exp(-1j * omega_discrete * n)
w_plus = np.array(weights.values())[:, :, 0]
w = np.vstack([w_plus, w_plus.conj()])
return np.linalg.pinv(F).dot(w)
# =========================================================================
# Classes (microphone array and beamformer related)
# =========================================================================
class MicrophoneArray(object):
"""Microphone array class."""
def __init__(self, R, fs):
R = np.array(R)
self.dim = R.shape[0] # are we in 2D or in 3D
# Check the shape of the passed array
if self.dim != 2 and self.dim != 3:
dim_mismatch = True
else:
dim_mismatch = False
if R.ndim != 2 or dim_mismatch:
raise ValueError(
"The location of microphones should be described by an array_like "
"object with 2 dimensions of shape `(2 or 3, n_mics)` "
"where `n_mics` is the number of microphones. Each column contains "
"the location of a microphone."
)
self.R = R # array geometry
self.fs = fs # sampling frequency of microphones
self.signals = None
self.center = np.mean(R, axis=1, keepdims=True)
def record(self, signals, fs):
"""
This simulates the recording of the signals by the microphones.
In particular, if the microphones and the room simulation
do not use the same sampling frequency, down/up-sampling
is done here.
Parameters
----------
signals:
An ndarray with as many lines as there are microphones.
fs:
the sampling frequency of the signals.
"""
if signals.shape[0] != self.M:
raise NameError(
"The signals array should have as many lines as "
"there are microphones."
)
if signals.ndim != 2:
raise NameError("The signals should be a 2D array.")
if fs != self.fs:
try:
import samplerate
fs_ratio = self.fs / float(fs)
newL = int(fs_ratio * signals.shape[1]) - 1
self.signals = np.zeros((self.M, newL))
# samplerate resample function considers columns as channels
# (hence the transpose)
for m in range(self.M):
self.signals[m] = samplerate.resample(
signals[m], fs_ratio, "sinc_best"
)
except ImportError:
raise ImportError(
"The samplerate package must be installed for"
" resampling of the signals."
)
else:
self.signals = signals
def to_wav(self, filename, mono=False, norm=False, bitdepth=np.float):
"""
Save all the signals to wav files.
Parameters
----------
filename: str
the name of the file
mono: bool, optional
if true, records only the center channel floor(M / 2) (default
`False`)
norm: bool, optional
if true, normalize the signal to fit in the dynamic range (default
`False`)
bitdepth: int, optional
the format of output samples [np.int8/16/32/64 or np.float
(default)]
"""
from scipy.io import wavfile
if mono is True:
signal = self.signals[self.M // 2]
else:
signal = self.signals.T # each column is a channel
float_types = [float, np.float, np.float32, np.float64]
if bitdepth in float_types:
bits = None
elif bitdepth is np.int8:
bits = 8
elif bitdepth is np.int16:
bits = 16
elif bitdepth is np.int32:
bits = 32
elif bitdepth is np.int64:
bits = 64
else:
raise NameError("No such type.")
if norm:
from .utilities import normalize
signal = normalize(signal, bits=bits)
signal = np.array(signal, dtype=bitdepth)
wavfile.write(filename, self.fs, signal)
def append(self, locs):
"""
Add some microphones to the array
Parameters
----------
locs: numpy.ndarray (2 or 3, n_mics)
Adds `n_mics` microphones to the array. The coordinates are passed as
a `numpy.ndarray` with each column containing the coordinates of a
microphone.
"""
if isinstance(locs, MicrophoneArray):
self.R = np.concatenate((self.R, locs.R), axis=1)
else:
self.R = np.concatenate((self.R, locs), axis=1)
# in case there was already some signal recorded, just pad with zeros
if self.signals is not None:
self.signals = np.concatenate(
(
self.signals,
np.zeros(
(locs.shape[1], self.signals.shape[1]), dtype=self.signals.dtype
),
),
axis=0,
)
def __len__(self):
return self.R.shape[1]
@property
def M(self):
return self.__len__()
class Beamformer(MicrophoneArray):
"""
At some point, in some nice way, the design methods
should also go here. Probably with generic arguments.
Parameters
----------
R: numpy.ndarray
Mics positions
fs: int
Sampling frequency
N: int, optional
Length of FFT, i.e. number of FD beamforming weights, equally spaced.
Defaults to 1024.
Lg: int, optional
Length of time-domain filters. Default to N.
hop: int, optional
Hop length for frequency domain processing. Default to N/2.
zpf: int, optional
Front zero padding length for frequency domain processing. Default is 0.
zpb: int, optional
Zero padding length for frequency domain processing. Default is 0.
"""
def __init__(self, R, fs, N=1024, Lg=None, hop=None, zpf=0, zpb=0):
MicrophoneArray.__init__(self, R, fs)
# only support even length (in freq)
if N % 2 == 1:
N += 1
self.N = int(N) # FFT length
if Lg is None:
self.Lg = N # TD filters length
else:
self.Lg = int(Lg)
# setup lengths for FD processing
self.zpf = int(zpf)
self.zpb = int(zpb)
self.L = self.N - self.zpf - self.zpb
if hop is None:
self.hop = self.L // 2
else:
self.hop = hop
# for now only support equally spaced frequencies
self.frequencies = np.arange(0, self.N // 2 + 1) / self.N * float(self.fs)
# weights will be computed later, the array is of shape (M, N/2+1)
self.weights = None
# the TD beamforming filters (M, Lg)
self.filters = None
def __add__(self, y):
""" Concatenates two beamformers together."""
newR = np.concatenate((self.R, y.R), axis=1)
return Beamformer(
newR, self.fs, self.Lg, self.N, hop=self.hop, zpf=self.zpf, zpb=self.zpb
)
def filters_from_weights(self, non_causal=0.0):
"""
Compute time-domain filters from frequency domain weights.
Parameters
----------
non_causal: float, optional
ratio of filter coefficients used for non-causal part
"""
if self.weights is None:
raise NameError("Weights must be defined.")
self.filters = np.zeros((self.M, self.Lg))
if self.N <= self.Lg:
# go back to time domain and shift DC to center
tw = np.fft.irfft(np.conj(self.weights), axis=1, n=self.N)
self.filters[:, : self.N] = np.concatenate(
(tw[:, -self.N // 2 :], tw[:, : self.N // 2]), axis=1
)
elif self.N > self.Lg:
# Least-square projection
for i in np.arange(self.M):
Lgp = np.floor((1 - non_causal) * self.Lg)
Lgm = self.Lg - Lgp
# the beamforming weights in frequency are the complex
# conjugates of the FT of the filter
w = np.concatenate((np.conj(self.weights[i]), self.weights[i, -2:0:-1]))
# create partial Fourier matrix
k = np.arange(self.N)[:, np.newaxis]
l = np.concatenate((np.arange(self.N - Lgm, self.N), np.arange(Lgp)))
F = np.exp(-2j * np.pi * k * l / self.N)
self.filters[i] = np.real(np.linalg.lstsq(F, w, rcond=None)[0])
def weights_from_filters(self):
if self.filters is None:
raise NameError("Filters must be defined.")
# this is what we want to use, really.
# self.weights = np.conj(np.fft.rfft(self.filters, n=self.N, axis=1))
# quick hack to be able to use MKL acceleration package from anaconda
self.weights = np.zeros((self.M, self.N // 2 + 1), dtype=np.complex128)
for m in range(self.M):
self.weights[m] = np.conj(np.fft.rfft(self.filters[m], n=self.N))
def steering_vector_2D(self, frequency, phi, dist, attn=False):
phi = np.array([phi]).reshape(phi.size)
# Assume phi and dist are measured from the array's center
X = dist * np.array([np.cos(phi), np.sin(phi)]) + self.center
D = distance(self.R, X)
omega = 2 * np.pi * frequency
if attn:
# TO DO 1: This will mean slightly different absolute value for
# every entry, even within the same steering vector. Perhaps a
# better paradigm is far-field with phase carrier.
return 1.0 / (4 * np.pi) / D * np.exp(-1j * omega * D / constants.get("c"))
else:
return np.exp(-1j * omega * D / constants.get("c"))
def steering_vector_2D_from_point(self, frequency, source, attn=True, ff=False):
""" Creates a steering vector for a particular frequency and source
Args:
frequency
source: location in cartesian coordinates
attn: include attenuation factor if True
ff: uses far-field distance if true
Return:
A 2x1 ndarray containing the steering vector.
"""
X = np.array(source)
if X.ndim == 1:
X = source[:, np.newaxis]
omega = 2 * np.pi * frequency
# normalize for far-field if requested
if ff:
# unit vectors pointing towards sources
p = X - self.center
p /= np.linalg.norm(p)
# The projected microphone distances on the unit vectors
D = -1 * np.dot(self.R.T, p)
# subtract minimum in each column
D -= np.min(D)
else:
D = distance(self.R, X)
phase = np.exp(-1j * omega * D / constants.get("c"))
if attn:
# TO DO 1: This will mean slightly different absolute value for
# every entry, even within the same steering vector. Perhaps a
# better paradigm is far-field with phase carrier.
return 1.0 / (4 * np.pi) / D * phase
else:
return phase
def response(self, phi_list, frequency):
i_freq = np.argmin(np.abs(self.frequencies - frequency))
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError(
"Beamforming weights or filters need to be computed" " first."
)
# For the moment assume that we are in 2D
bfresp = np.dot(
H(self.weights[:, i_freq]),
self.steering_vector_2D(
self.frequencies[i_freq], phi_list, constants.get("ffdist")
),
)
return self.frequencies[i_freq], bfresp
def response_from_point(self, x, frequency):
i_freq = np.argmin(np.abs(self.frequencies - frequency))
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError(
"Beamforming weights or filters need to be computed" " first."
)
# For the moment assume that we are in 2D
bfresp = np.dot(
H(self.weights[:, i_freq]),
self.steering_vector_2D_from_point(
self.frequencies[i_freq], x, attn=True, ff=False
),
)
return self.frequencies[i_freq], bfresp
def plot_response_from_point(self, x, legend=None):
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError(
"Beamforming weights or filters need to be computed" " first."
)
if x.ndim == 0:
x = np.array([x])
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn("Matplotlib is required for plotting")
return
HF = np.zeros((x.shape[1], self.frequencies.shape[0]), dtype=complex)
for k, p in enumerate(x.T):
for i, f in enumerate(self.frequencies):
r = np.dot(
H(self.weights[:, i]),
self.steering_vector_2D_from_point(f, p, attn=True, ff=False),
)
HF[k, i] = r[0]
plt.subplot(2, 1, 1)
plt.title("Beamformer response")
for hf in HF:
plt.plot(self.frequencies, np.abs(hf))
plt.ylabel("Modulus")
plt.axis("tight")
plt.legend(legend)
plt.subplot(2, 1, 2)
for hf in HF:
plt.plot(self.frequencies, np.unwrap(np.angle(hf)))
plt.ylabel("Phase")
plt.xlabel("Frequency [Hz]")
plt.axis("tight")
plt.legend(legend)
def plot_beam_response(self):
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError(
"Beamforming weights or filters need to be computed" " first."
)
phi = np.linspace(-np.pi, np.pi - np.pi / 180, 360)
freq = self.frequencies
resp = np.zeros((freq.shape[0], phi.shape[0]), dtype=complex)
for i, f in enumerate(freq):
# For the moment assume that we are in 2D
resp[i, :] = np.dot(
H(self.weights[:, i]),
self.steering_vector_2D(f, phi, constants.get("ffdist")),
)
H_abs = np.abs(resp) ** 2
H_abs /= H_abs.max()
H_abs = 10 * np.log10(H_abs + 1e-10)
p_min = 0
p_max = 100
vmin, vmax = np.percentile(H_abs.flatten(), [p_min, p_max])
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn("Matplotlib is required for plotting")
return
plt.imshow(
H_abs,
aspect="auto",
origin="lower",
interpolation="sinc",
vmax=vmax,
vmin=vmin,
)
plt.xlabel("Angle [rad]")
xticks = [-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi]
for i, p in enumerate(xticks):
xticks[i] = np.argmin(np.abs(p - phi))
xticklabels = ["$-\pi$", "$-\pi/2$", "0", "$\pi/2$", "$\pi$"]
plt.setp(plt.gca(), "xticks", xticks)
plt.setp(plt.gca(), "xticklabels", xticklabels)
plt.ylabel("Freq [kHz]")
yticks = np.zeros(4)
f_0 = np.floor(self.fs / 8000.0)
for i in np.arange(1, 5):
yticks[i - 1] = np.argmin(np.abs(freq - 1000.0 * i * f_0))
# yticks = np.array(plt.getp(plt.gca(), 'yticks'), dtype=np.int)
plt.setp(plt.gca(), "yticks", yticks)
plt.setp(plt.gca(), "yticklabels", np.arange(1, 5) * f_0)
def snr(self, source, interferer, f, R_n=None, dB=False):
i_f = np.argmin(np.abs(self.frequencies - f))
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError(
"Beamforming weights or filters need to be computed" " first."
)
# This works at a single frequency because otherwise we need to pass
# many many covariance matrices. Easy to change though (you can also
# have frequency independent R_n).
if R_n is None:
R_n = np.zeros((self.M, self.M))
# To compute the SNR, we /must/ use the real steering vectors, so no
# far field, and attn=True
A_good = self.steering_vector_2D_from_point(
self.frequencies[i_f], source.images, attn=True, ff=False
)
if interferer is not None:
A_bad = self.steering_vector_2D_from_point(
self.frequencies[i_f], interferer.images, attn=True, ff=False
)
R_nq = R_n + sumcols(A_bad) * H(sumcols(A_bad))
else:
R_nq = R_n
w = self.weights[:, i_f]
a_1 = sumcols(A_good)
SNR = np.real(mdot(H(w), a_1, H(a_1), w) / mdot(H(w), R_nq, w))
if dB is True:
SNR = 10 * np.log10(SNR)
return SNR
def udr(self, source, interferer, f, R_n=None, dB=False):
i_f = np.argmin(np.abs(self.frequencies - f))
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError(
"Beamforming weights or filters need to be computed" " first."
)
if R_n is None:
R_n = np.zeros((self.M, self.M))
A_good = self.steering_vector_2D_from_point(
self.frequencies[i_f], source.images, attn=True, ff=False
)
if interferer is not None:
A_bad = self.steering_vector_2D_from_point(
self.frequencies[i_f], interferer.images, attn=True, ff=False
)
R_nq = R_n + sumcols(A_bad).dot(H(sumcols(A_bad)))
else:
R_nq = R_n
w = self.weights[:, i_f]
UDR = np.real(mdot(H(w), A_good, H(A_good), w) / mdot(H(w), R_nq, w))
if dB is True:
UDR = 10 * np.log10(UDR)
return UDR
def process(self, FD=False):
if self.signals is None or len(self.signals) == 0:
raise NameError("No signal to beamform.")
if FD is True:
# STFT processing
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError(
"Beamforming weights or filters need to be " "computed first."
)
# create window functions
analysis_win = windows.hann(self.L)
# perform STFT
sig_stft = transform.analysis(
self.signals.T,
L=self.L,
hop=self.hop,
win=analysis_win,
zp_back=self.zpb,
zp_front=self.zpf,
)
# beamform
sig_stft_bf = np.sum(sig_stft * self.weights.conj().T, axis=2)
# back to time domain
output = transform.synthesis(
sig_stft_bf, L=self.L, hop=self.hop, zp_back=self.zpb, zp_front=self.zpf
)
# remove the zero padding from output signal
if self.zpb == 0:
output = output[self.zpf :]
else:
output = output[self.zpf : -self.zpb]
else:
# TD processing
if self.weights is not None and self.filters is None:
self.filters_from_weights()
elif self.weights is None and self.filters is None:
raise NameError(
"Beamforming weights or filters need to be " "computed first."
)
from scipy.signal import fftconvolve
# do real STFT of first signal
output = fftconvolve(self.filters[0], self.signals[0])
for i in range(1, len(self.signals)):
output += fftconvolve(self.filters[i], self.signals[i])
return output
def plot(self, sum_ir=False, FD=True):
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is not None and self.filters is None:
self.filters_from_weights()
elif self.weights is None and self.filters is None:
raise NameError(
"Beamforming weights or filters need to be " "computed first."
)
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn("Matplotlib is required for plotting")
return
if FD is True:
plt.subplot(2, 2, 1)
plt.plot(self.frequencies, np.abs(self.weights.T))
plt.title("Beamforming weights [modulus]")
plt.xlabel("Frequency [Hz]")
plt.ylabel("Weight modulus")
plt.subplot(2, 2, 2)
plt.plot(self.frequencies, np.unwrap(np.angle(self.weights.T), axis=0))
plt.title("Beamforming weights [phase]")
plt.xlabel("Frequency [Hz]")
plt.ylabel("Unwrapped phase")
plt.subplot(2, 1, 2)
plt.plot(np.arange(self.Lg) / float(self.fs), self.filters.T)
plt.title("Beamforming filters")
plt.xlabel("Time [s]")
plt.ylabel("Filter amplitude")
plt.axis("tight")
plt.tight_layout(pad=0.1)
def far_field_weights(self, phi):
"""
This method computes weight for a far field at infinity
phi: direction of beam
"""
u = unit_vec2D(phi)
proj = np.dot(u.T, self.R - self.center)[0]
# normalize the first arriving signal to ensure a causal filter
proj -= proj.max()
self.weights = np.exp(
2j * np.pi * self.frequencies[:, np.newaxis] * proj / constants.get("c")
).T
def rake_delay_and_sum_weights(
self, source, interferer=None, R_n=None, attn=True, ff=False
):
self.weights = np.zeros((self.M, self.frequencies.shape[0]), dtype=complex)
K = source.images.shape[1] - 1
for i, f in enumerate(self.frequencies):
W = self.steering_vector_2D_from_point(f, source.images, attn=attn, ff=ff)
self.weights[:, i] = 1.0 / self.M / (K + 1) * np.sum(W, axis=1)
def rake_one_forcing_weights(
self, source, interferer=None, R_n=None, ff=False, attn=True
):
if R_n is None:
R_n = np.zeros((self.M, self.M))
self.weights = np.zeros((self.M, self.frequencies.shape[0]), dtype=complex)
for i, f in enumerate(self.frequencies):
if interferer is None:
A_bad = np.array([[]])
else:
A_bad = self.steering_vector_2D_from_point(
f, interferer.images, attn=attn, ff=ff
)
R_nq = R_n + sumcols(A_bad).dot(H(sumcols(A_bad)))
A_s = self.steering_vector_2D_from_point(f, source.images, attn=attn, ff=ff)
R_nq_inv = np.linalg.pinv(R_nq)
D = np.linalg.pinv(mdot(H(A_s), R_nq_inv, A_s))
self.weights[:, i] = sumcols(mdot(R_nq_inv, A_s, D))[:, 0]
def rake_max_sinr_weights(
self, source, interferer=None, R_n=None, rcond=0.0, ff=False, attn=True
):
"""
This method computes a beamformer focusing on a number of specific
sources and ignoring a number of interferers.
INPUTS
* source : source locations
* interferer : interferer locations
"""
if R_n is None:
R_n = np.zeros((self.M, self.M))
self.weights = np.zeros((self.M, self.frequencies.shape[0]), dtype=complex)
for i, f in enumerate(self.frequencies):
A_good = self.steering_vector_2D_from_point(
f, source.images, attn=attn, ff=ff
)
if interferer is None:
A_bad = np.array([[]])
else:
A_bad = self.steering_vector_2D_from_point(
f, interferer.images, attn=attn, ff=ff
)
a_good = sumcols(A_good)
a_bad = sumcols(A_bad)
# TO DO: Fix this (check for numerical rank, use the low rank
# approximation)
K_inv = np.linalg.pinv(
a_bad.dot(H(a_bad)) + R_n + rcond * np.eye(A_bad.shape[0])
)
self.weights[:, i] = (K_inv.dot(a_good) / mdot(H(a_good), K_inv, a_good))[
:, 0
]
def rake_max_udr_weights(
self, source, interferer=None, R_n=None, ff=False, attn=True
):
if source.images.shape[1] == 1:
self.rake_max_sinr_weights(
source.images, interferer.images, R_n=R_n, ff=ff, attn=attn
)
return
if R_n is None:
R_n = np.zeros((self.M, self.M))
self.weights = np.zeros((self.M, self.frequencies.shape[0]), dtype=complex)
for i, f in enumerate(self.frequencies):
A_good = self.steering_vector_2D_from_point(
f, source.images, attn=attn, ff=ff
)
if interferer is None:
A_bad = np.array([[]])
else:
A_bad = self.steering_vector_2D_from_point(
f, interferer.images, attn=attn, ff=ff
)
R_nq = R_n + sumcols(A_bad).dot(H(sumcols(A_bad)))
C = np.linalg.cholesky(R_nq)
l, v = np.linalg.eig(
mdot(np.linalg.inv(C), A_good, H(A_good), H(np.linalg.inv(C)))
)
self.weights[:, i] = np.linalg.inv(H(C)).dot(v[:, 0])
def rake_max_udr_filters(
self, source, interferer=None, R_n=None, delay=0.03, epsilon=5e-3
):
"""
Compute directly the time-domain filters maximizing the
Useful-to-Detrimental Ratio (UDR).
This beamformer is not practical. It maximizes the UDR ratio in the time
domain directly without imposing flat response towards the source of
interest. This results in severe distortion of the desired signal.
Parameters
----------
source: pyroomacoustics.SoundSource
the desired source
interferer: pyroomacoustics.SoundSource, optional
the interfering source
R_n: ndarray, optional
the noise covariance matrix, it should be (M * Lg)x(M * Lg) where M
is the number of sensors and Lg the filter length
delay: float, optional
the signal delay introduced by the beamformer (default 0.03 s)
epsilon: float
"""
if delay > self.Lg / self.fs:
print("Warning: filter length shorter than beamformer delay")
if R_n is None:
R_n = np.zeros((self.M * self.Lg, self.M * self.Lg))
if interferer is not None:
H = build_rir_matrix(
self.R,
(source, interferer),
self.Lg,
self.fs,
epsilon=epsilon,
unit_damping=True,
)
L = H.shape[1] // 2
else:
H = build_rir_matrix(
self.R, (source,), self.Lg, self.fs, epsilon=epsilon, unit_damping=True
)
L = H.shape[1]
# Delay of the system in samples
kappa = int(delay * self.fs)
precedence = int(0.030 * self.fs)
# the constraint
n = int(np.minimum(L, kappa + precedence))
Hnc = H[:, :kappa]
Hpr = H[:, kappa:n]
A = np.dot(Hpr, Hpr.T)
B = np.dot(Hnc, Hnc.T) + np.dot(H[:, L:], H[:, L:].T) + R_n
if interferer is not None:
Hc = H[:, n:L]
B += np.dot(Hc, Hc.T)
# solve the problem
SINR, v = la.eigh(
A,
b=B,
eigvals=(self.M * self.Lg - 1, self.M * self.Lg - 1),
overwrite_a=True,
overwrite_b=True,
check_finite=False,
)
g_val = np.real(v[:, 0])
# reshape and store
self.filters = g_val.reshape((self.M, self.Lg))
# compute and return SNR
return SINR[0]
def rake_perceptual_filters(
self, source, interferer=None, R_n=None, delay=0.03, d_relax=0.035, epsilon=5e-3
):
"""
Compute directly the time-domain filters for a perceptually motivated
beamformer. The beamformer minimizes noise and interference, but relaxes
the response of the filter within the 30 ms following the delay.
"""
if delay > self.Lg / self.fs:
print("Warning: filter length shorter than beamformer delay")
if R_n is None:
R_n = np.zeros((self.M * self.Lg, self.M * self.Lg))
# build the channel matrix
if interferer is not None:
H = build_rir_matrix(
self.R,
(source, interferer),
self.Lg,
self.fs,
epsilon=epsilon,
unit_damping=True,
)
L = H.shape[1] // 2
else:
H = build_rir_matrix(
self.R, (source,), self.Lg, self.fs, epsilon=epsilon, unit_damping=True
)
L = H.shape[1]
# Delay of the system in samples
tau = int(delay * self.fs)
kappa = int(d_relax * self.fs)
# the constraint
A = np.concatenate((H[:, : tau + 1], H[:, tau + kappa :]), axis=1)
b = np.zeros((A.shape[1], 1))
b[tau, 0] = 1
# We first assume the sample are uncorrelated
K_nq = R_n
if interferer is not None:
K_nq += np.dot(H[:, L:], H[:, L:].T)
# causal response construction
C = la.cho_factor(K_nq, overwrite_a=True, check_finite=False)
B = la.cho_solve(C, A)
D = np.dot(A.T, B)
C = la.cho_factor(D, overwrite_a=True, check_finite=False)
x = la.cho_solve(C, b)
g_val = np.dot(B, x)
# reshape and store
self.filters = g_val.reshape((self.M, self.Lg))
# compute and return SNR
A = np.dot(g_val.T, H[:, :L])
num = np.dot(A, A.T)
denom = np.dot(np.dot(g_val.T, K_nq), g_val)
return num / denom
def rake_max_sinr_filters(self, source, interferer, R_n, epsilon=5e-3, delay=0.0):
"""
Compute the time-domain filters of SINR maximizing beamformer.
"""
H = build_rir_matrix(
self.R,
(source, interferer),
self.Lg,
self.fs,
epsilon=epsilon,
unit_damping=True,
)
L = H.shape[1] / 2
# We first assume the sample are uncorrelated
K_s = np.dot(H[:, :L], H[:, :L].T)
K_nq = np.dot(H[:, L:], H[:, L:].T) + R_n
# Compute TD filters using generalized Rayleigh coefficient maximization
SINR, v = la.eigh(
K_s,
b=K_nq,
eigvals=(self.M * self.Lg - 1, self.M * self.Lg - 1),
overwrite_a=True,
overwrite_b=True,
check_finite=False,
)
g_val = np.real(v[:, 0])
self.filters = g_val.reshape((self.M, self.Lg))
# compute and return SNR
return SINR[0]
def rake_distortionless_filters(
self, source, interferer, R_n, delay=0.03, epsilon=5e-3
):
"""
Compute time-domain filters of a beamformer minimizing noise and
interference while forcing a distortionless response towards the source.
"""
H = build_rir_matrix(
self.R,
(source, interferer),
self.Lg,
self.fs,
epsilon=epsilon,
unit_damping=True,
)
L = H.shape[1] / 2
# We first assume the sample are uncorrelated
K_nq = np.dot(H[:, L:], H[:, L:].T) + R_n
# constraint
kappa = int(delay * self.fs)
A = H[:, :L]
b = np.zeros((L, 1))
b[kappa, 0] = 1
# filter computation
C = la.cho_factor(K_nq, overwrite_a=True, check_finite=False)
B = la.cho_solve(C, A)
D = np.dot(A.T, B)
C = la.cho_factor(D, overwrite_a=True, check_finite=False)
x = la.cho_solve(C, b)
g_val = np.dot(B, x)
# reshape and store
self.filters = g_val.reshape((self.M, self.Lg))
# compute and return SNR
A = np.dot(g_val.T, H[:, :L])
num = np.dot(A, A.T)
denom = np.dot(np.dot(g_val.T, K_nq), g_val)
return num / denom
def rake_mvdr_filters(self, source, interferer, R_n, delay=0.03, epsilon=5e-3):
"""
Compute the time-domain filters of the minimum variance distortionless
response beamformer.
"""
H = build_rir_matrix(
self.R,
(source, interferer),
self.Lg,
self.fs,
epsilon=epsilon,
unit_damping=True,
)
L = H.shape[1] // 2
# the constraint vector
kappa = int(delay * self.fs)
h = H[:, kappa]
# We first assume the sample are uncorrelated
R_xx = np.dot(H[:, :L], H[:, :L].T)
K_nq = np.dot(H[:, L:], H[:, L:].T) + R_n
# Compute the TD filters
C = la.cho_factor(R_xx + K_nq, check_finite=False)
g_val = la.cho_solve(C, h)
g_val /= np.inner(h, g_val)
self.filters = g_val.reshape((self.M, self.Lg))
# compute and return SNR
num = np.inner(g_val.T, np.dot(R_xx, g_val))
denom = np.inner(np.dot(g_val.T, K_nq), g_val)
return num / denom
def rake_one_forcing_filters(self, sources, interferers, R_n, epsilon=5e-3):
"""
Compute the time-domain filters of a beamformer with unit response
towards multiple sources.
"""
dist_mat = distance(self.R, sources.images)
s_time = dist_mat / constants.get("c")
s_dmp = 1.0 / (4 * np.pi * dist_mat)
dist_mat = distance(self.R, interferers.images)
i_time = dist_mat / constants.get("c")
i_dmp = 1.0 / (4 * np.pi * dist_mat)
# compute offset needed for decay of sinc by epsilon
offset = np.maximum(s_dmp.max(), i_dmp.max()) / (np.pi * self.fs * epsilon)
t_min = np.minimum(s_time.min(), i_time.min())
t_max = np.maximum(s_time.max(), i_time.max())
# adjust timing
s_time -= t_min - offset
i_time -= t_min - offset
Lh = np.ceil((t_max - t_min + 2 * offset) * float(self.fs))
# the channel matrix
K = sources.images.shape[1]
Lg = self.Lg
off = (Lg - Lh) / 2
L = self.Lg + Lh - 1
H = np.zeros((Lg * self.M, 2 * L))
As = np.zeros((Lg * self.M, K))
for r in np.arange(self.M):
# build constraint matrix
hs = u.low_pass_dirac(
s_time[r, :, np.newaxis], s_dmp[r, :, np.newaxis], self.fs, Lh
)[:, ::-1]
As[r * Lg + off : r * Lg + Lh + off, :] = hs.T
# build interferer RIR matrix
hx = u.low_pass_dirac(
s_time[r, :, np.newaxis], s_dmp[r, :, np.newaxis], self.fs, Lh
).sum(axis=0)
H[r * Lg : (r + 1) * Lg, :L] = u.convmtx(hx, Lg).T
# build interferer RIR matrix
hq = u.low_pass_dirac(
i_time[r, :, np.newaxis], i_dmp[r, :, np.newaxis], self.fs, Lh
).sum(axis=0)
H[r * Lg : (r + 1) * Lg, L:] = u.convmtx(hq, Lg).T
ones = np.ones((K, 1))
# We first assume the sample are uncorrelated
K_x = np.dot(H[:, :L], H[:, :L].T)
K_nq = np.dot(H[:, L:], H[:, L:].T) + R_n
# Compute the TD filters
K_nq_inv = np.linalg.inv(K_x + K_nq)
C = np.dot(K_nq_inv, As)
B = np.linalg.inv(np.dot(As.T, C))
g_val = np.dot(C, np.dot(B, ones))
self.filters = g_val.reshape((self.M, Lg))
# compute and return SNR
A = np.dot(g_val.T, H[:, :L])
num = np.dot(A, A.T)
denom = np.dot(np.dot(g_val.T, K_nq), g_val)
return num / denom
| mit |
nodchip/tanuki- | script/analyze_learning_log.py | 1 | 3669 | # -*- coding: cp932 -*-
import os
import sys
import re
import glob
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
import pandas as pd
def analyze_log(file_path):
with open(file_path, 'rb') as fi:
sfens_pat = re.compile(r'^(?P<sfens>\d+) sfens ,')
# record_pat = re.compile(r'^hirate eval = (?P<hirate_eval>.*) , dsig rmse = (?P<dsig_rmse>.*) , dsig mae = (?P<dsig_mae>.*) , eval mae = (?P<eval_mae>.*) , test_cross_entropy_eval = (?P<tcee>.*) , test_cross_entropy_win = (?P<tcew>.*) , test_cross_entropy = (?P<tce>.*) , learn_cross_entropy_eval = (?P<lcee>.*) , learn_cross_entropy_win = (?P<lcew>.*) , learn_cross_entropy = (?P<lce>.*)')
record_pat = re.compile(r'^hirate eval = (?P<hirate_eval>.*) , test_cross_entropy_eval = (?P<tcee>.*) , test_cross_entropy_win = (?P<tcew>.*) , test_cross_entropy = (?P<tce>.*) , learn_cross_entropy_eval = (?P<lcee>.*) , learn_cross_entropy_win = (?P<lcew>.*) , learn_cross_entropy = (?P<lce>.*) , norm = (?P<norm>.*) , move accuracy = (?P<move_acc>.*)%')
epoch_pat = re.compile(r'^epoch.*')
log = []
for line in fi.readlines():
mo = sfens_pat.search(line)
if mo:
sfens = int(mo.groupdict()['sfens'])
continue
mo = epoch_pat.search(line)
if mo:
continue
mo = record_pat.search(line)
if mo:
# sfens += 1000000 # output every 1M sfens.
if sfens < 2000000: # skip early period
continue;
hirate_eval = float(mo.groupdict()['hirate_eval'])
# dsig_rmse = float(mo.groupdict()['dsig_rmse'])
# dsig_mae = float(mo.groupdict()['dsig_mae'])
# eval_mae = float(mo.groupdict()['eval_mae'])
tce = float(mo.groupdict()['tce'])
lce = float(mo.groupdict()['lce'])
norm = float(mo.groupdict()['norm'])
move_acc = float(mo.groupdict()['move_acc'])
# log.append((sfens, hirate_eval, dsig_rmse , dsig_mae , eval_mae , tce , lce))
log.append((sfens, hirate_eval, tce , lce , norm , move_acc))
if len(log) == 0:
print('{}: Empty'.format(file_path))
return None
else:
print('{}: {}'.format(file_path, len(log)))
# dataframe
# df = pd.DataFrame(data=log, columns='sfens hirate_eval dsig_rmse dsig_mae eval_mae tce lce'.split())
df = pd.DataFrame(data=log, columns='sfens hirate_eval tce lce norm move_acc'.split())
# plot
fig, ax = plt.subplots(1, 1)
ax.plot(
df['sfens'],
df['tce'],
color='red', label='tce')
ax.set_xlabel('# SFENs')
ax.legend(loc='upper left').get_frame().set_alpha(0.5)
ax.plot(
df['sfens'],
df['lce'],
# df['move_acc'],
color='green', label='lce')
ax.legend(loc='upper right').get_frame().set_alpha(0.5)
# ax.plot(
# df['sfens'],
# df['norm'],
# color='black', label='norm')
ax.set_title(file_path)
return fig
if __name__ == '__main__':
with matplotlib.backends.backend_pdf.PdfPages('yane.pdf') as pdf:
for file_path in sorted(glob.glob(os.path.join(sys.argv[1], '*', 'log'))):
fig = analyze_log(file_path)
if fig is not None:
pdf.savefig(fig)
d = pdf.infodict()
d['Title'] = u'Yanelog analysis of [{}]'.format(sys.argv[1])
d['CreationDate'] = datetime.datetime.now()
plt.show()
| gpl-3.0 |
phdowling/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
korbonits/data-science-from-scratch | code/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
mmottahedi/nilmtk | nilmtk/building.py | 6 | 1972 | from __future__ import print_function, division
from collections import namedtuple, OrderedDict
import pandas as pd
from .metergroup import MeterGroup
from .datastore.datastore import join_key
from .hashable import Hashable
BuildingID = namedtuple('BuildingID', ['instance', 'dataset'])
class Building(Hashable):
"""
Attributes
----------
elec : MeterGroup
metadata : dict
Metadata just about this building (e.g. geo location etc).
See http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#building
Has these additional keys:
dataset : string
"""
def __init__(self):
self.elec = MeterGroup()
self.metadata = {}
def import_metadata(self, store, key, dataset_name):
self.metadata = store.load_metadata(key)
if not self.metadata.has_key('dataset'):
self.metadata['dataset'] = dataset_name
elec_meters = self.metadata.pop('elec_meters', {})
appliances = self.metadata.pop('appliances', [])
self.elec.import_metadata(store, elec_meters, appliances, self.identifier)
def save(self, destination, key):
destination.write_metadata(key, self.metadata)
self.elec.save(destination, join_key(key, 'elec'))
@property
def identifier(self):
md = self.metadata
return BuildingID(instance=md.get('instance'),
dataset=md.get('dataset'))
def describe(self, **kwargs):
"""Returns a Series describing this building."""
md = self.metadata
series = pd.Series(name=self.identifier.instance)
for key in ['instance', 'building_type',
'construction_year', 'energy_improvements', 'heating',
'ownership', 'n_occupants', 'description_of_occupants']:
series[key] = md.get(key)
series = pd.concat([series, self.elec.describe(**kwargs)])
return series
| apache-2.0 |
ptonner/GPy | GPy/plotting/matplot_dep/models_plots.py | 8 | 18909 | # Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from . import Tango
from .base_plots import gpplot, x_frame1D, x_frame2D,gperrors
from ...models.gp_coregionalized_regression import GPCoregionalizedRegression
from ...models.sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression
from scipy import sparse
from ...core.parameterization.variational import VariationalPosterior
from matplotlib import pyplot as plt
def plot_data(model, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
fignum=None, ax=None, data_symbol='kx',mew=1.5):
"""
Plot the training data
- For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed.
Can plot only part of the data
using which_data_rows and which_data_ycols.
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice model.X, model.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_rows: 'all' or a list of integers
:param visible_dims: an array specifying the input dimensions to plot (maximum two)
:type visible_dims: a numpy array
:param fignum: figure to plot on.
:type fignum: figure number
:param ax: axes to plot on.
:type ax: axes handle
"""
#deal with optional arguments
if which_data_rows == 'all':
which_data_rows = slice(None)
if which_data_ycols == 'all':
which_data_ycols = np.arange(model.output_dim)
if ax is None:
fig = plt.figure(num=fignum)
ax = fig.add_subplot(111)
#data
X = model.X
Y = model.Y
#work out what the inputs are for plotting (1D or 2D)
if visible_dims is None:
visible_dims = np.arange(model.input_dim)
assert visible_dims.size <= 2, "Visible inputs cannot be larger than two"
free_dims = visible_dims
plots = {}
#one dimensional plotting
if len(free_dims) == 1:
for d in which_data_ycols:
plots['dataplot'] = ax.plot(X[which_data_rows,free_dims], Y[which_data_rows, d], data_symbol, mew=mew)
#2D plotting
elif len(free_dims) == 2:
for d in which_data_ycols:
plots['dataplot'] = ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40,
Y[which_data_rows, d], cmap=plt.cm.jet, vmin=Y.min(), vmax=Y.max(), linewidth=0.)
else:
raise NotImplementedError("Cannot define a frame with more than two input dimensions")
return plots
def plot_fit(model, plot_limits=None, which_data_rows='all',
which_data_ycols='all', fixed_inputs=[],
levels=20, samples=0, fignum=None, ax=None, resolution=None,
plot_raw=False,
linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None, data_symbol='kx',
apply_link=False, samples_y=0, plot_uncertain_inputs=True, predict_kw=None, plot_training_data=True):
"""
Plot the posterior of the GP.
- In one dimension, the function is plotted with a shaded region identifying two standard deviations.
- In two dimsensions, a contour-plot shows the mean predicted function
- In higher dimensions, use fixed_inputs to plot the GP with some of the inputs fixed.
Can plot only part of the data and part of the posterior functions
using which_data_rowsm which_data_ycols.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice model.X, model.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_rows: 'all' or a list of integers
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v.
:type fixed_inputs: a list of tuples
:param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure
:type levels: int
:param samples: the number of a posteriori samples to plot p(f*|y)
:type samples: int
:param fignum: figure to plot on.
:type fignum: figure number
:param ax: axes to plot on.
:type ax: axes handle
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D
:type resolution: int
:param plot_raw: Whether to plot the raw function p(f|y)
:type plot_raw: boolean
:param linecol: color of line to plot.
:type linecol: hex or color
:param fillcol: color of fill
:type fillcol: hex or color
:param apply_link: apply the link function if plotting f (default false), as well as posterior samples if requested
:type apply_link: boolean
:param samples_y: the number of posteriori f samples to plot p(y*|y)
:type samples_y: int
:param plot_uncertain_inputs: plot the uncertainty of the inputs as error bars if they have uncertainty (BGPLVM etc.)
:type plot_uncertain_inputs: boolean
:param predict_kw: keyword args for _raw_predict and predict functions if required
:type predict_kw: dict
:param plot_training_data: whether or not to plot the training points
:type plot_training_data: boolean
"""
#deal with optional arguments
if which_data_rows == 'all':
which_data_rows = slice(None)
if which_data_ycols == 'all':
which_data_ycols = np.arange(model.output_dim)
#if len(which_data_ycols)==0:
#raise ValueError('No data selected for plotting')
if ax is None:
fig = plt.figure(num=fignum)
ax = fig.add_subplot(111)
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
X = model.X.mean
X_variance = model.X.variance
else:
X = model.X
Y = model.Y
if sparse.issparse(Y): Y = Y.todense().view(np.ndarray)
if hasattr(model, 'Z'): Z = model.Z
if predict_kw is None:
predict_kw = {}
#work out what the inputs are for plotting (1D or 2D)
fixed_dims = np.array([i for i,v in fixed_inputs])
free_dims = np.setdiff1d(np.arange(model.input_dim),fixed_dims)
plots = {}
#one dimensional plotting
if len(free_dims) == 1:
#define the frame on which to plot
Xnew, xmin, xmax = x_frame1D(X[:,free_dims], plot_limits=plot_limits, resolution=resolution or 200)
Xgrid = np.empty((Xnew.shape[0],model.input_dim))
Xgrid[:,free_dims] = Xnew
for i,v in fixed_inputs:
Xgrid[:,i] = v
#make a prediction on the frame and plot it
if plot_raw:
m, v = model._raw_predict(Xgrid, **predict_kw)
if apply_link:
lower = model.likelihood.gp_link.transf(m - 2*np.sqrt(v))
upper = model.likelihood.gp_link.transf(m + 2*np.sqrt(v))
#Once transformed this is now the median of the function
m = model.likelihood.gp_link.transf(m)
else:
lower = m - 2*np.sqrt(v)
upper = m + 2*np.sqrt(v)
else:
if isinstance(model,GPCoregionalizedRegression) or isinstance(model,SparseGPCoregionalizedRegression):
extra_data = Xgrid[:,-1:].astype(np.int)
if Y_metadata is None:
Y_metadata = {'output_index': extra_data}
else:
Y_metadata['output_index'] = extra_data
m, v = model.predict(Xgrid, full_cov=False, Y_metadata=Y_metadata, **predict_kw)
fmu, fv = model._raw_predict(Xgrid, full_cov=False, **predict_kw)
lower, upper = model.likelihood.predictive_quantiles(fmu, fv, (2.5, 97.5), Y_metadata=Y_metadata)
for d in which_data_ycols:
plots['gpplot'] = gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], ax=ax, edgecol=linecol, fillcol=fillcol)
#if not plot_raw: plots['dataplot'] = ax.plot(X[which_data_rows,free_dims], Y[which_data_rows, d], data_symbol, mew=1.5)
if not plot_raw and plot_training_data:
plots['dataplot'] = plot_data(model=model, which_data_rows=which_data_rows,
visible_dims=free_dims, data_symbol=data_symbol, mew=1.5, ax=ax, fignum=fignum)
#optionally plot some samples
if samples: #NOTE not tested with fixed_inputs
Fsim = model.posterior_samples_f(Xgrid, samples)
if apply_link:
Fsim = model.likelihood.gp_link.transf(Fsim)
for fi in Fsim.T:
plots['posterior_samples'] = ax.plot(Xnew, fi[:,None], '#3300FF', linewidth=0.25)
#ax.plot(Xnew, fi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs.
if samples_y: #NOTE not tested with fixed_inputs
Ysim = model.posterior_samples(Xgrid, samples_y, Y_metadata=Y_metadata)
for yi in Ysim.T:
plots['posterior_samples_y'] = ax.scatter(Xnew, yi[:,None], s=5, c=Tango.colorsHex['darkBlue'], marker='o', alpha=0.5)
#ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs.
#add error bars for uncertain (if input uncertainty is being modelled)
if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs() and plot_uncertain_inputs:
if plot_raw:
#add error bars for uncertain (if input uncertainty is being modelled), for plot_f
#Hack to plot error bars on latent function, rather than on the data
vs = model.X.mean.values.copy()
for i,v in fixed_inputs:
vs[:,i] = v
m_X, _ = model._raw_predict(vs)
if apply_link:
m_X = model.likelihood.gp_link.transf(m_X)
plots['xerrorbar'] = ax.errorbar(X[which_data_rows, free_dims].flatten(), m_X[which_data_rows, which_data_ycols].flatten(),
xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()),
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
else:
plots['xerrorbar'] = ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(),
xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()),
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
#set the limits of the plot to some sensible values
ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper))
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
#add inducing inputs (if a sparse model is used)
if hasattr(model,"Z"):
#Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims]
if isinstance(model,SparseGPCoregionalizedRegression):
Z = Z[Z[:,-1] == Y_metadata['output_index'],:]
Zu = Z[:,free_dims]
z_height = ax.get_ylim()[0]
plots['inducing_inputs'] = ax.plot(Zu, np.zeros_like(Zu) + z_height, 'r|', mew=1.5, markersize=12)
#2D plotting
elif len(free_dims) == 2:
#define the frame for plotting on
resolution = resolution or 50
Xnew, _, _, xmin, xmax = x_frame2D(X[:,free_dims], plot_limits, resolution)
Xgrid = np.empty((Xnew.shape[0],model.input_dim))
Xgrid[:,free_dims] = Xnew
for i,v in fixed_inputs:
Xgrid[:,i] = v
x, y = np.linspace(xmin[0], xmax[0], resolution), np.linspace(xmin[1], xmax[1], resolution)
#predict on the frame and plot
if plot_raw:
m, _ = model._raw_predict(Xgrid, **predict_kw)
else:
if isinstance(model,GPCoregionalizedRegression) or isinstance(model,SparseGPCoregionalizedRegression):
extra_data = Xgrid[:,-1:].astype(np.int)
if Y_metadata is None:
Y_metadata = {'output_index': extra_data}
else:
Y_metadata['output_index'] = extra_data
m, v = model.predict(Xgrid, full_cov=False, Y_metadata=Y_metadata, **predict_kw)
for d in which_data_ycols:
m_d = m[:,d].reshape(resolution, resolution).T
plots['contour'] = ax.contour(x, y, m_d, levels, vmin=m.min(), vmax=m.max(), cmap=plt.cm.jet)
#if not plot_raw: plots['dataplot'] = ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40, Y[which_data_rows, d], cmap=plt.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.)
if not plot_raw and plot_training_data:
plots['dataplot'] = ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40, Y[which_data_rows, d], cmap=plt.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.)
#set the limits of the plot to some sensible values
ax.set_xlim(xmin[0], xmax[0])
ax.set_ylim(xmin[1], xmax[1])
if samples:
warnings.warn("Samples are rather difficult to plot for 2D inputs...")
#add inducing inputs (if a sparse model is used)
if hasattr(model,"Z"):
#Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims]
Zu = Z[:,free_dims]
plots['inducing_inputs'] = ax.plot(Zu[:,0], Zu[:,1], 'wo')
else:
raise NotImplementedError("Cannot define a frame with more than two input dimensions")
return plots
def plot_fit_f(model, *args, **kwargs):
"""
Plot the GP's view of the world, where the data is normalized and before applying a likelihood.
All args and kwargs are passed on to models_plots.plot.
"""
kwargs['plot_raw'] = True
plot_fit(model,*args, **kwargs)
def fixed_inputs(model, non_fixed_inputs, fix_routine='median', as_list=True, X_all=False):
"""
Convenience function for returning back fixed_inputs where the other inputs
are fixed using fix_routine
:param model: model
:type model: Model
:param non_fixed_inputs: dimensions of non fixed inputs
:type non_fixed_inputs: list
:param fix_routine: fixing routine to use, 'mean', 'median', 'zero'
:type fix_routine: string
:param as_list: if true, will return a list of tuples with (dimension, fixed_val) otherwise it will create the corresponding X matrix
:type as_list: boolean
"""
f_inputs = []
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
X = model.X.mean.values.copy()
elif isinstance(model.X, VariationalPosterior):
X = model.X.values.copy()
else:
if X_all:
X = model.X_all.copy()
else:
X = model.X.copy()
for i in range(X.shape[1]):
if i not in non_fixed_inputs:
if fix_routine == 'mean':
f_inputs.append( (i, np.mean(X[:,i])) )
if fix_routine == 'median':
f_inputs.append( (i, np.median(X[:,i])) )
else: # set to zero zero
f_inputs.append( (i, 0) )
if not as_list:
X[:,i] = f_inputs[-1][1]
if as_list:
return f_inputs
else:
return X
def errorbars_trainset(model, which_data_rows='all',
which_data_ycols='all', fixed_inputs=[],
fignum=None, ax=None,
linecol='red', data_symbol='kx',
predict_kw=None, plot_training_data=True, **kwargs):
"""
Plot the posterior error bars corresponding to the training data
- For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed.
Can plot only part of the data
using which_data_rows and which_data_ycols.
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice model.X, model.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_rows: 'all' or a list of integers
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v.
:type fixed_inputs: a list of tuples
:param fignum: figure to plot on.
:type fignum: figure number
:param ax: axes to plot on.
:type ax: axes handle
:param plot_training_data: whether or not to plot the training points
:type plot_training_data: boolean
"""
#deal with optional arguments
if which_data_rows == 'all':
which_data_rows = slice(None)
if which_data_ycols == 'all':
which_data_ycols = np.arange(model.output_dim)
if ax is None:
fig = plt.figure(num=fignum)
ax = fig.add_subplot(111)
X = model.X
Y = model.Y
if predict_kw is None:
predict_kw = {}
#work out what the inputs are for plotting (1D or 2D)
fixed_dims = np.array([i for i,v in fixed_inputs])
free_dims = np.setdiff1d(np.arange(model.input_dim),fixed_dims)
plots = {}
#one dimensional plotting
if len(free_dims) == 1:
m, v = model.predict(X, full_cov=False, Y_metadata=model.Y_metadata, **predict_kw)
fmu, fv = model._raw_predict(X, full_cov=False, **predict_kw)
lower, upper = model.likelihood.predictive_quantiles(fmu, fv, (2.5, 97.5), Y_metadata=model.Y_metadata)
for d in which_data_ycols:
plots['gperrors'] = gperrors(X, m[:, d], lower[:, d], upper[:, d], edgecol=linecol, ax=ax, fignum=fignum, **kwargs )
if plot_training_data:
plots['dataplot'] = plot_data(model=model, which_data_rows=which_data_rows,
visible_dims=free_dims, data_symbol=data_symbol, mew=1.5, ax=ax, fignum=fignum)
#set the limits of the plot to some sensible values
ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper))
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
ax.set_xlim(X[:,free_dims].min(), X[:,free_dims].max())
ax.set_ylim(ymin, ymax)
elif len(free_dims) == 2:
raise NotImplementedError("Not implemented yet")
else:
raise NotImplementedError("Cannot define a frame with more than two input dimensions")
return plots
| bsd-3-clause |
caseyclements/blaze | blaze/compute/pyfunc.py | 7 | 6410 | from __future__ import absolute_import, division, print_function
import pandas as pd
from ..expr import (Expr, Symbol, Field, Arithmetic, Math,
Date, Time, DateTime, Millisecond, Microsecond, broadcast,
sin, cos, Map, UTCFromTimestamp, DateTimeTruncate, symbol,
USub, Not, notnull)
from ..expr import math as expr_math
from ..expr.expressions import valid_identifier
from ..dispatch import dispatch
from . import pydatetime
import datetime
import math
import toolz
import itertools
funcnames = ('func_%d' % i for i in itertools.count())
def parenthesize(s):
if ' ' in s:
return '(%s)' % s
else:
return s
def print_python(leaves, expr):
""" Print expression to be evaluated in Python
>>> from blaze.expr import ceil, sin
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> print_python([t], t.x + t.y)
('t[0] + t[1]', {})
Supports mathematical and datetime access
>>> print_python([t], sin(t.x) > ceil(t.y)) # doctest: +SKIP
('math.sin(t[0]) > math.ceil(t[1])', {'math':<module 'math'>})
>>> print_python([t], t.when.day + 1)
('t[3].day + 1', {})
Specify leaves of the expression to control level of printing
>>> print_python([t.x, t.y], t.x + t.y)
('x + y', {})
Returns
-------
s: string
A evalable string
scope: dict
A namespace to add to be given to eval
"""
if isinstance(expr, Expr) and any(expr.isidentical(lf) for lf in leaves):
return valid_identifier(expr._name), {}
return _print_python(expr, leaves=leaves)
@dispatch(object)
def _print_python(expr, leaves=None):
return repr(expr), {}
@dispatch((datetime.datetime, datetime.date))
def _print_python(expr, leaves=None):
return repr(expr), {'datetime': datetime, 'Timestamp': pd.Timestamp}
@dispatch(Symbol)
def _print_python(expr, leaves=None):
return valid_identifier(expr._name), {}
@dispatch(Field)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
index = expr._child.fields.index(expr._name)
return '%s[%d]' % (parenthesize(child), index), scope
@dispatch(Arithmetic)
def _print_python(expr, leaves=None):
lhs, left_scope = print_python(leaves, expr.lhs)
rhs, right_scope = print_python(leaves, expr.rhs)
return ('%s %s %s' % (parenthesize(lhs),
expr.symbol,
parenthesize(rhs)),
toolz.merge(left_scope, right_scope))
@dispatch(USub)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return '%s%s' % (expr.symbol, parenthesize(child)), scope
@dispatch(Not)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return 'not %s' % parenthesize(child), scope
@dispatch(Math)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('math.%s(%s)' % (type(expr).__name__, child),
toolz.merge(scope, {'math': math}))
@dispatch(expr_math.abs)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('abs(%s)' % child, scope)
@dispatch(Date)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.date()' % parenthesize(child), scope)
@dispatch(Time)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.time()' % parenthesize(child), scope)
@dispatch(Millisecond)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.microsecond // 1000' % parenthesize(child), scope)
@dispatch(UTCFromTimestamp)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('datetime.datetime.utcfromtimestamp(%s)' % parenthesize(child),
toolz.merge({'datetime': datetime}, scope))
@dispatch(DateTime)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
attr = type(expr).__name__.lower()
return ('%s.%s' % (parenthesize(child), attr), scope)
@dispatch(DateTimeTruncate)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
scope['truncate'] = pydatetime.truncate
return ('truncate(%s, %s, "%s")' % (child, expr.measure, expr.unit),
scope)
@dispatch(Map)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
funcname = next(funcnames)
return ('%s(%s)' % (funcname, child),
toolz.assoc(scope, funcname, expr.func))
@dispatch(notnull)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('notnull(%s)' % child,
toolz.merge(scope, dict(notnull=lambda x: x is not None)))
@dispatch(Expr)
def _print_python(expr, leaves=None):
raise NotImplementedError("Do not know how to write expressions of type %s"
" to Python code" % type(expr).__name__)
def funcstr(leaves, expr):
""" Lambda string for an expresion
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> funcstr([t], t.x + t.y)
('lambda t: t[0] + t[1]', {})
>>> funcstr([t.x, t.y], t.x + t.y)
('lambda x, y: x + y', {})
Also returns scope for libraries like math or datetime
>>> funcstr([t.x, t.y], sin(t.x) + t.y) # doctest: +SKIP
('lambda x, y: math.sin(x) + y', {'math': <module 'math'>})
>>> from datetime import date
>>> funcstr([t.x, t.y, t.when], t.when.date > date(2001, 12, 25)) #doctest: +SKIP
('lambda x, y, when: when.day > datetime.date(2001, 12, 25)', {'datetime': <module 'datetime'>})
"""
result, scope = print_python(leaves, expr)
leaf_names = [print_python([leaf], leaf)[0] for leaf in leaves]
return 'lambda %s: %s' % (', '.join(leaf_names),
result), scope
def lambdify(leaves, expr):
""" Lambda for an expresion
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> f = lambdify([t], t.x + t.y)
>>> f((1, 10, 100, ''))
11
>>> f = lambdify([t.x, t.y, t.z, t.when], t.x + cos(t.y))
>>> f(1, 0, 100, '')
2.0
"""
s, scope = funcstr(leaves, expr)
return eval(s, scope)
| bsd-3-clause |
mutirri/bokeh | bokeh/charts/builder/tests/test_histogram_builder.py | 33 | 4257 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
from mock import patch
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pandas as pd
from bokeh.charts import Histogram
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestHistogram(unittest.TestCase):
def test_supported_input(self):
normal = [1, 2, 3, 1]
lognormal = [5, 4, 4, 1]
xyvalues = OrderedDict(normal=normal, lognormal=lognormal)
xyvaluesdf = pd.DataFrame(xyvalues)
exptected = dict(
leftnormal=[1., 1.4, 1.8, 2.2, 2.6],
rightnormal=[1.4, 1.8, 2.2, 2.6, 3.],
lognormal=[5, 4, 4, 1],
edgeslognormal=[1., 1.8, 2.6, 3.4, 4.2, 5.],
bottomlognormal=[0, 0, 0, 0, 0],
bottomnormal=[0, 0, 0, 0, 0],
edgesnormal=[1., 1.4, 1.8, 2.2, 2.6, 3.],
histlognormal=[0.3125, 0., 0., 0.625, 0.3125],
leftlognormal=[1., 1.8, 2.6, 3.4, 4.2],
normal=[1, 2, 3, 1],
rightlognormal=[1.8, 2.6, 3.4, 4.2, 5.],
histnormal=[1.25, 0., 0.625, 0., 0.625],
)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Histogram, _xy, bins=5)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
for key, expected_v in exptected.items():
assert_array_almost_equal(builder._data[key], expected_v, decimal=2)
lvalues = [[1, 2, 3, 1], [5, 4, 4, 1]]
for i, _xy in enumerate([lvalues, np.array(lvalues)]):
hm = create_chart(Histogram, _xy, bins=5)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1'])
for key, expected_v in exptected.items():
# replace the keys because we have 0, 1 instead of normal and lognormal
key = key.replace('lognormal', '1').replace('normal', '0')
assert_array_almost_equal(builder._data[key], expected_v, decimal=2)
@patch('bokeh.charts.builder.histogram_builder.np.histogram', return_value=([1, 3, 4], [2.4, 4]))
def test_histogram_params(self, histogram_mock):
inputs = [[5, 0, 0.5, True], [3, 1, 0, False]]
normal = [1, 2, 3, 1]
lognormal = [5, 4, 4, 1]
xyvalues = OrderedDict()
xyvalues['normal'] = normal
xyvalues['lognormal'] = lognormal
for (bins, mu, sigma, dens) in inputs:
histogram_mock.reset_mock()
kws = dict(bins=bins, mu=mu, sigma=sigma, density=dens)
hm = create_chart(Histogram, xyvalues, compute_values=False, **kws)
builder = hm._builders[0]
# ensure all class attributes have been correctly set
for key, value in kws.items():
self.assertEqual(getattr(builder, key), value)
builder._process_data()
# ensure we are calling numpy.histogram with the right args
calls = histogram_mock.call_args_list
assert_array_equal(calls[0][0][0], np.array([1, 2, 3, 1]))
assert_array_equal(calls[1][0][0], np.array([5, 4, 4, 1]))
self.assertEqual(calls[0][1]['bins'], bins)
self.assertEqual(calls[1][1]['bins'], bins)
self.assertEqual(calls[0][1]['density'], dens)
self.assertEqual(calls[1][1]['density'], dens)
| bsd-3-clause |
roxyboy/bokeh | examples/plotting/server/elements.py | 42 | 1532 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import pandas as pd
from bokeh.plotting import figure, show, output_server
from bokeh.sampledata import periodic_table
elements = periodic_table.elements
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = list(reversed([
"#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"
]))
melting_points = elements["melting point"]
low = min(melting_points)
high= max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
meltingpointcolors = [palette[i] for i in melting_point_inds]
output_server("elements")
TOOLS = "pan,wheel_zoom,box_zoom,reset,resize,save"
p = figure(tools=TOOLS, toolbar_location="left", plot_width=1200)
p.title = "Density vs Atomic Weight of Elements (colored by melting point)"
p.background_fill= "#cccccc"
p.circle(elements["atomic mass"], elements["density"], size=12,
color=meltingpointcolors, line_color="black", fill_alpha=0.8)
p.text(elements["atomic mass"], elements["density"]+0.3,
text=elements["symbol"], text_color="#333333",
text_align="center", text_font_size="10pt")
p.xaxis.axis_label="atomic weight (amu)"
p.yaxis.axis_label="density (g/cm^3)"
p.grid.grid_line_color="white"
show(p)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.