repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
harinisuresh/yelp-district-clustering
|
LDAWithGensim.py
|
1
|
2666
|
"""For Training the LDA model"""
import logging
import gensim
from gensim.corpora import BleiCorpus
from gensim import corpora
from DataImporter import get_vegas_reviews
import re
from sklearn.feature_extraction import text as sktext
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class Corpus(object):
def __init__(self, values, reviews_dictionary, corpus_path):
self.values = values
self.reviews_dictionary = reviews_dictionary
self.corpus_path = corpus_path
def __iter__(self):
for business_id, review in self.values.iteritems():
yield self.reviews_dictionary.doc2bow(review["words"])
def serialize(self):
BleiCorpus.serialize(self.corpus_path, self, id2word=self.reviews_dictionary)
return self
class Dictionary(object):
def __init__(self, values, dictionary_path):
self.values = values
self.dictionary_path = dictionary_path
def build(self):
dictionary = corpora.Dictionary(review["words"] for business_id, review in self.values.iteritems())
dictionary.filter_extremes(keep_n=10000)
dictionary.compactify()
corpora.Dictionary.save(dictionary, self.dictionary_path)
return dictionary
class Train():
def __init__(self):
pass
@staticmethod
def run(lda_model_path, corpus_path, num_topics, id2word):
corpus = corpora.BleiCorpus(corpus_path)
lda = gensim.models.LdaModel(corpus, num_topics=num_topics, id2word=id2word, passes=10, eval_every=10, iterations=500)
lda.save(lda_model_path)
return lda
def get_words_from_text(text, custom_stop_words = []):
stoplist = sktext.ENGLISH_STOP_WORDS
words = [word for word in re.split('[\s,.()!&?/\*\^#@0-9":=\[\]$\\;%]|--', text.lower()) if word not in stoplist and word != ""]
return words
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
dictionary_path = "models/dictionary.dict"
corpus_path = "models/corpus.lda-c"
lda_num_topics = 50
lda_model_path = "models/lda_model.lda"
reviews = get_vegas_reviews()
corpus_collection = {business_id : {"review_text" : review_text, "words": get_words_from_text(review_text)} for business_id, review_text in reviews.iteritems()}
dictionary = Dictionary(corpus_collection, dictionary_path).build()
Corpus(corpus_collection, dictionary, corpus_path).serialize()
lda = Train.run(lda_model_path, corpus_path, lda_num_topics, dictionary)
print "Converged on Topics:"
lda.print_topics(50)
if __name__ == '__main__':
main()
|
mit
|
jjs0sbw/CSPLN
|
apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/numpy/lib/recfunctions.py
|
23
|
34483
|
"""
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in itertools.izip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).iteritems():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = map(np.asanyarray, seqarrays)
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else :
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = names.index(name)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names:
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names:
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
|
gpl-3.0
|
mne-tools/mne-tools.github.io
|
0.18/_downloads/794e62cee9c736ff983cc5308c172250/plot_find_ecg_artifacts.py
|
10
|
1296
|
"""
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
raw.del_proj()
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
|
bsd-3-clause
|
u-engine/rpg_svo
|
svo_analysis/src/svo_analysis/analyse_timing.py
|
17
|
3476
|
#!/usr/bin/python
import os
import numpy as np
import matplotlib.pyplot as plt
def analyse_timing(D, trace_dir):
# identify measurements which result from normal frames and which from keyframes
is_frame = np.argwhere(D['repr_n_mps'] >= 0)
n_frames = len(is_frame)
# set initial time to zero
D['timestamp'] = D['timestamp'] - D['timestamp'][0]
# ----------------------------------------------------------------------------
# plot total time for frame processing
avg_time = np.mean(D['tot_time'][is_frame])*1000;
fig = plt.figure(figsize=(8, 3))
ax = fig.add_subplot(111, ylabel='processing time [ms]', xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['tot_time'][is_frame]*1000, 'g-', label='total time [ms]')
ax.plot(D['timestamp'][is_frame], np.ones(n_frames)*avg_time, 'b--', label=str('%(time).1fms mean time' % {'time': avg_time}))
ax.legend()
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot boxplot
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='Processing time [ms]')
ax.boxplot([
D['tot_time'][is_frame]*1000,
# D['t_local_ba'][is_kf]*1000,
D['pose_optimizer'][is_frame]*1000 + D['point_optimizer'][is_frame]*1000,
D['reproject'][is_frame]*1000,
D['sparse_img_align'][is_frame]*1000,
D['pyramid_creation'][is_frame]*1000
], 0,'', vert=0)
boxplot_labels = [
r'\textbf{Total Motion Estimation: %2.2fms}' % np.median(D['tot_time'][is_frame]*1000),
# 'Local BA (KF only): %.2fms ' % np.median(D['local_ba'][is_kf]*1000),
'Refinement: %2.2fms' % np.median(D['pose_optimizer'][is_frame]*1000 + D['point_optimizer'][is_frame]*1000),
'Feature Alignment: %2.2fms' % np.median(D['reproject'][is_frame]*1000),
'Sparse Image Alignment: %2.2fms' % np.median(D['sparse_img_align'][is_frame]*1000),
'Pyramid Creation: %2.2fms' % np.median(D['pyramid_creation'][is_frame]*1000) ]
ax.set_yticks(np.arange(len(boxplot_labels))+1)
ax.set_yticklabels(boxplot_labels)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing_boxplot.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot boxplot reprojection
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='Processing time [ms]')
ax.boxplot([ D['reproject'][is_frame]*1000,
D['feature_align'][is_frame]*1000,
D['reproject_candidates'][is_frame]*1000,
D['reproject_kfs'][is_frame]*1000 ], 0, '', vert=0)
boxplot_labels = [r'\textbf{Total Reprojection: %2.2fms}' % np.median(D['reproject'][is_frame]*1000),
'Feature Alignment: %2.2fms' % np.median(D['feature_align'][is_frame]*1000),
'Reproject Candidates: %2.2fms' % np.median(D['reproject_candidates'][is_frame]*1000),
'Reproject Keyframes: %2.2fms' % np.median(D['reproject_kfs'][is_frame]*1000) ]
ax.set_yticks(np.arange(len(boxplot_labels))+1)
ax.set_yticklabels(boxplot_labels)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing_reprojection.pdf'), bbox_inches="tight")
|
gpl-3.0
|
maaskola/GPy
|
GPy/models/mrd.py
|
8
|
14617
|
# ## Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import itertools, logging
from ..kern import Kern
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..core.parameterization import Param, Parameterized
from ..core.parameterization.observable_array import ObsAr
from ..inference.latent_function_inference.var_dtc import VarDTC
from ..inference.latent_function_inference import InferenceMethodList
from ..likelihoods import Gaussian
from ..util.initialization import initialize_latent
from ..core.sparse_gp import SparseGP, GP
from GPy.core.parameterization.variational import VariationalPosterior
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
class MRD(BayesianGPLVMMiniBatch):
"""
!WARNING: This is bleeding edge code and still in development.
Functionality may change fundamentally during development!
Apply MRD to all given datasets Y in Ylist.
Y_i in [n x p_i]
If Ylist is a dictionary, the keys of the dictionary are the names, and the
values are the different datasets to compare.
The samples n in the datasets need
to match up, whereas the dimensionality p_d can differ.
:param [array-like] Ylist: List of datasets to apply MRD on
:param input_dim: latent dimensionality
:type input_dim: int
:param array-like X: mean of starting latent space q in [n x q]
:param array-like X_variance: variance of starting latent space q in [n x q]
:param initx: initialisation method for the latent space :
* 'concat' - PCA on concatenation of all datasets
* 'single' - Concatenation of PCA on datasets, respectively
* 'random' - Random draw from a Normal(0,1)
:type initx: ['concat'|'single'|'random']
:param initz: initialisation method for inducing inputs
:type initz: 'permute'|'random'
:param num_inducing: number of inducing inputs to use
:param Z: initial inducing inputs
:param kernel: list of kernels or kernel to copy for each output
:type kernel: [GPy.kernels.kernels] | GPy.kernels.kernels | None (default)
:param :class:`~GPy.inference.latent_function_inference inference_method:
InferenceMethodList of inferences, or one inference method for all
:param :class:`~GPy.likelihoodss.likelihoods.likelihoods` likelihoods: the likelihoods to use
:param str name: the name of this model
:param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None
:param bool|Norm normalizer: How to normalize the data?
:param bool stochastic: Should this model be using stochastic gradient descent over the dimensions?
:param bool|[bool] batchsize: either one batchsize for all, or one batchsize per dataset.
"""
def __init__(self, Ylist, input_dim, X=None, X_variance=None,
initx = 'PCA', initz = 'permute',
num_inducing=10, Z=None, kernel=None,
inference_method=None, likelihoods=None, name='mrd',
Ynames=None, normalizer=False, stochastic=False, batchsize=10):
self.logger = logging.getLogger(self.__class__.__name__)
self.input_dim = input_dim
self.num_inducing = num_inducing
if isinstance(Ylist, dict):
Ynames, Ylist = zip(*Ylist.items())
self.logger.debug("creating observable arrays")
self.Ylist = [ObsAr(Y) for Y in Ylist]
#The next line is a fix for Python 3. It replicates the python 2 behaviour from the above comprehension
Y = Ylist[-1]
if Ynames is None:
self.logger.debug("creating Ynames")
Ynames = ['Y{}'.format(i) for i in range(len(Ylist))]
self.names = Ynames
assert len(self.names) == len(self.Ylist), "one name per dataset, or None if Ylist is a dict"
if inference_method is None:
self.inference_method = InferenceMethodList([VarDTC() for _ in range(len(self.Ylist))])
else:
assert isinstance(inference_method, InferenceMethodList), "please provide one inference method per Y in the list and provide it as InferenceMethodList, inference_method given: {}".format(inference_method)
self.inference_method = inference_method
if X is None:
X, fracs = self._init_X(initx, Ylist)
else:
fracs = [X.var(0)]*len(Ylist)
Z = self._init_Z(initz, X)
self.Z = Param('inducing inputs', Z)
self.num_inducing = self.Z.shape[0] # ensure M==N if M>N
# sort out the kernels
self.logger.info("building kernels")
if kernel is None:
from ..kern import RBF
kernels = [RBF(input_dim, ARD=1, lengthscale=1./fracs[i]) for i in range(len(Ylist))]
elif isinstance(kernel, Kern):
kernels = []
for i in range(len(Ylist)):
k = kernel.copy()
kernels.append(k)
else:
assert len(kernel) == len(Ylist), "need one kernel per output"
assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!"
kernels = kernel
self.variational_prior = NormalPrior()
#self.X = NormalPosterior(X, X_variance)
if likelihoods is None:
likelihoods = [Gaussian(name='Gaussian_noise'.format(i)) for i in range(len(Ylist))]
else: likelihoods = likelihoods
self.logger.info("adding X and Z")
super(MRD, self).__init__(Y, input_dim, X=X, X_variance=X_variance, num_inducing=num_inducing,
Z=self.Z, kernel=None, inference_method=self.inference_method, likelihood=Gaussian(),
name='manifold relevance determination', normalizer=None,
missing_data=False, stochastic=False, batchsize=1)
self._log_marginal_likelihood = 0
self.unlink_parameter(self.likelihood)
self.unlink_parameter(self.kern)
del self.kern
del self.likelihood
self.num_data = Ylist[0].shape[0]
if isinstance(batchsize, int):
batchsize = itertools.repeat(batchsize)
self.bgplvms = []
for i, n, k, l, Y, im, bs in zip(itertools.count(), Ynames, kernels, likelihoods, Ylist, self.inference_method, batchsize):
assert Y.shape[0] == self.num_data, "All datasets need to share the number of datapoints, and those have to correspond to one another"
md = np.isnan(Y).any()
spgp = BayesianGPLVMMiniBatch(Y, input_dim, X, X_variance,
Z=Z, kernel=k, likelihood=l,
inference_method=im, name=n,
normalizer=normalizer,
missing_data=md,
stochastic=stochastic,
batchsize=bs)
spgp.kl_factr = 1./len(Ynames)
spgp.unlink_parameter(spgp.Z)
spgp.unlink_parameter(spgp.X)
del spgp.Z
del spgp.X
spgp.Z = self.Z
spgp.X = self.X
self.link_parameter(spgp, i+2)
self.bgplvms.append(spgp)
self.posterior = None
self.logger.info("init done")
def parameters_changed(self):
self._log_marginal_likelihood = 0
self.Z.gradient[:] = 0.
self.X.gradient[:] = 0.
for b, i in zip(self.bgplvms, self.inference_method):
self._log_marginal_likelihood += b._log_marginal_likelihood
self.logger.info('working on im <{}>'.format(hex(id(i))))
self.Z.gradient[:] += b.Z.gradient#full_values['Zgrad']
#grad_dict = b.full_values
if self.has_uncertain_inputs():
self.X.gradient += b._Xgrad
else:
self.X.gradient += b._Xgrad
#if self.has_uncertain_inputs():
# # update for the KL divergence
# self.variational_prior.update_gradients_KL(self.X)
# self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
# pass
def log_likelihood(self):
return self._log_marginal_likelihood
def _init_X(self, init='PCA', Ylist=None):
if Ylist is None:
Ylist = self.Ylist
if init in "PCA_concat":
X, fracs = initialize_latent('PCA', self.input_dim, np.hstack(Ylist))
fracs = [fracs]*len(Ylist)
elif init in "PCA_single":
X = np.zeros((Ylist[0].shape[0], self.input_dim))
fracs = []
for qs, Y in zip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist):
x,frcs = initialize_latent('PCA', len(qs), Y)
X[:, qs] = x
fracs.append(frcs)
else: # init == 'random':
X = np.random.randn(Ylist[0].shape[0], self.input_dim)
fracs = X.var(0)
fracs = [fracs]*len(Ylist)
X -= X.mean()
X /= X.std()
return X, fracs
def _init_Z(self, init="permute", X=None):
if X is None:
X = self.X
if init in "permute":
Z = np.random.permutation(X.copy())[:self.num_inducing]
elif init in "random":
Z = np.random.randn(self.num_inducing, self.input_dim) * X.var()
return Z
def _handle_plotting(self, fignum, axes, plotf, sharex=False, sharey=False):
import matplotlib.pyplot as plt
if axes is None:
fig = plt.figure(num=fignum)
sharex_ax = None
sharey_ax = None
plots = []
for i, g in enumerate(self.bgplvms):
try:
if sharex:
sharex_ax = ax # @UndefinedVariable
sharex = False # dont set twice
if sharey:
sharey_ax = ax # @UndefinedVariable
sharey = False # dont set twice
except:
pass
if axes is None:
ax = fig.add_subplot(1, len(self.bgplvms), i + 1, sharex=sharex_ax, sharey=sharey_ax)
elif isinstance(axes, (tuple, list, np.ndarray)):
ax = axes[i]
else:
raise ValueError("Need one axes per latent dimension input_dim")
plots.append(plotf(i, g, ax))
if sharey_ax is not None:
plt.setp(ax.get_yticklabels(), visible=False)
plt.draw()
if axes is None:
try:
fig.tight_layout()
except:
pass
return plots
def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None, Yindex=0):
"""
Prediction for data set Yindex[default=0].
This predicts the output mean and variance for the dataset given in Ylist[Yindex]
"""
b = self.bgplvms[Yindex]
self.posterior = b.posterior
self.kern = b.kern
self.likelihood = b.likelihood
return super(MRD, self).predict(Xnew, full_cov, Y_metadata, kern)
#===============================================================================
# TODO: Predict! Maybe even change to several bgplvms, which share an X?
#===============================================================================
# def plot_predict(self, fignum=None, ax=None, sharex=False, sharey=False, **kwargs):
# fig = self._handle_plotting(fignum,
# ax,
# lambda i, g, ax: ax.imshow(g.predict(g.X)[0], **kwargs),
# sharex=sharex, sharey=sharey)
# return fig
def plot_scales(self, fignum=None, ax=None, titles=None, sharex=False, sharey=True, *args, **kwargs):
"""
TODO: Explain other parameters
:param titles: titles for axes of datasets
"""
if titles is None:
titles = [r'${}$'.format(name) for name in self.names]
ymax = reduce(max, [np.ceil(max(g.kern.input_sensitivity())) for g in self.bgplvms])
def plotf(i, g, ax):
#ax.set_ylim([0,ymax])
return g.kern.plot_ARD(ax=ax, title=titles[i], *args, **kwargs)
fig = self._handle_plotting(fignum, ax, plotf, sharex=sharex, sharey=sharey)
return fig
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
"""
see plotting.matplot_dep.dim_reduction_plots.plot_latent
if predict_kwargs is None, will plot latent spaces for 0th dataset (and kernel), otherwise give
predict_kwargs=dict(Yindex='index') for plotting only the latent space of dataset with 'index'.
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from matplotlib import pyplot as plt
from ..plotting.matplot_dep import dim_reduction_plots
if "Yindex" not in predict_kwargs:
predict_kwargs['Yindex'] = 0
Yindex = predict_kwargs['Yindex']
if ax is None:
fig = plt.figure(num=fignum)
ax = fig.add_subplot(111)
else:
fig = ax.figure
self.kern = self.bgplvms[Yindex].kern
self.likelihood = self.bgplvms[Yindex].likelihood
plot = dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
ax.set_title(self.bgplvms[Yindex].name)
try:
fig.tight_layout()
except:
pass
return plot
def __getstate__(self):
state = super(MRD, self).__getstate__()
if 'kern' in state:
del state['kern']
if 'likelihood' in state:
del state['likelihood']
return state
def __setstate__(self, state):
# TODO:
super(MRD, self).__setstate__(state)
self.kern = self.bgplvms[0].kern
self.likelihood = self.bgplvms[0].likelihood
self.parameters_changed()
|
bsd-3-clause
|
ahoyosid/scikit-learn
|
sklearn/utils/tests/test_sparsefuncs.py
|
57
|
13752
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
|
bsd-3-clause
|
heprom/pymicro
|
examples/3d_visualisation/steel_damage_3d.py
|
1
|
2034
|
import os, vtk, numpy as np
from vtk.util import numpy_support
from vtk.util.colors import *
from pymicro.file.file_utils import HST_read, HST_write, HST_info
from pymicro.view.vtk_utils import *
if __name__ == '__main__':
'''
Create a 3d scene showing a damaged tension steel sample.
The sample outline is made semi-transparent and cavities are shown
in blue. The axes are labeled (L,T,S) accordingly to the material
directions.
'''
print('reading volume...')
data_dir = '../data'
scan_name = 'steel_bin_431x431x246_uint8'
scan_path = os.path.join(data_dir, scan_name)
infos = HST_info(scan_path + '.raw.info')
volsize = np.array([infos['x_dim'], infos['y_dim'], infos['z_dim']])
print(volsize)
grid = read_image_data(scan_path + '.raw', volsize, header_size=0, data_type='uint8')
print('setting actors...')
damage = contourFilter(grid, 255, opacity=1.0, discrete=True, color=blue, diffuseColor=blue)
skin = contourFilter(grid, 155, opacity=0.05, discrete=True)
outline = data_outline(grid)
# Create renderer
ren = vtk.vtkRenderer()
ren.SetBackground(1.0, 1.0, 1.0)
ren.AddActor(outline)
ren.AddActor(skin)
ren.AddActor(damage)
print('setting up LTS axes')
axes = axes_actor(length=100)
axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetColor(grey)
ax1 = 'L';
ax2 = 'S';
ax3 = 'T'
axes.SetXAxisLabelText(ax1)
axes.SetYAxisLabelText(ax2)
axes.SetZAxisLabelText(ax3)
ren.AddViewProp(axes);
print('generating views...')
cam = setup_camera(size=(volsize))
ren.SetActiveCamera(cam)
ren.AddViewProp(axes)
cam.SetFocalPoint(0.5 * volsize)
cam.SetPosition(500 + volsize)
cam.SetViewUp(0, 0, 1)
image_name = os.path.splitext(__file__)[0] + '.png'
print 'writting %s' % image_name
render(ren, ren_size=(800, 800), save=True, display=False, name=image_name)
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
|
mit
|
rabernat/xray
|
xarray/core/dataset.py
|
1
|
129317
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from collections import Mapping, defaultdict
from distutils.version import LooseVersion
from numbers import Number
import warnings
import sys
import numpy as np
import pandas as pd
from . import ops
from . import utils
from . import groupby
from . import resample
from . import rolling
from . import indexing
from . import alignment
from . import formatting
from . import duck_array_ops
from .. import conventions
from .alignment import align
from .coordinates import DatasetCoordinates, LevelCoordinatesSource, Indexes
from .common import ImplementsDatasetReduce, BaseDataObject
from .dtypes import is_datetime_like
from .merge import (dataset_update_method, dataset_merge_method,
merge_data_and_coords, merge_variables)
from .utils import (Frozen, SortedKeysDict, maybe_wrap_array, hashable,
decode_numpy_dict_values, ensure_us_time_resolution)
from .variable import (Variable, as_variable, IndexVariable,
broadcast_variables)
from .pycompat import (iteritems, basestring, OrderedDict,
integer_types, dask_array_type, range)
from .options import OPTIONS
import xarray as xr
# list of attributes of pd.DatetimeIndex that are ndarrays of time info
_DATETIMEINDEX_COMPONENTS = ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond', 'date',
'time', 'dayofyear', 'weekofyear', 'dayofweek',
'quarter']
def _get_virtual_variable(variables, key, level_vars=None, dim_sizes=None):
"""Get a virtual variable (e.g., 'time.year' or a MultiIndex level)
from a dict of xarray.Variable objects (if possible)
"""
if level_vars is None:
level_vars = {}
if dim_sizes is None:
dim_sizes = {}
if key in dim_sizes:
data = pd.Index(range(dim_sizes[key]), name=key)
variable = IndexVariable((key,), data)
return key, key, variable
if not isinstance(key, basestring):
raise KeyError(key)
split_key = key.split('.', 1)
if len(split_key) == 2:
ref_name, var_name = split_key
elif len(split_key) == 1:
ref_name, var_name = key, None
else:
raise KeyError(key)
if ref_name in level_vars:
dim_var = variables[level_vars[ref_name]]
ref_var = dim_var.to_index_variable().get_level_variable(ref_name)
else:
ref_var = variables[ref_name]
if var_name is None:
virtual_var = ref_var
var_name = key
else:
if is_datetime_like(ref_var.dtype):
ref_var = xr.DataArray(ref_var)
data = getattr(ref_var.dt, var_name).data
else:
data = getattr(ref_var, var_name).data
virtual_var = Variable(ref_var.dims, data)
return ref_name, var_name, virtual_var
def calculate_dimensions(variables):
"""Calculate the dimensions corresponding to a set of variables.
Returns dictionary mapping from dimension names to sizes. Raises ValueError
if any of the dimension sizes conflict.
"""
dims = OrderedDict()
last_used = {}
scalar_vars = set(k for k, v in iteritems(variables) if not v.dims)
for k, var in iteritems(variables):
for dim, size in zip(var.dims, var.shape):
if dim in scalar_vars:
raise ValueError('dimension %r already exists as a scalar '
'variable' % dim)
if dim not in dims:
dims[dim] = size
last_used[dim] = k
elif dims[dim] != size:
raise ValueError('conflicting sizes for dimension %r: '
'length %s on %r and length %s on %r' %
(dim, size, k, dims[dim], last_used[dim]))
return dims
def merge_indexes(
indexes, # type: Dict[Any, Union[Any, List[Any]]]
variables, # type: Dict[Any, Variable]
coord_names, # type: Set
append=False, # type: bool
):
# type: (...) -> Tuple[OrderedDict[Any, Variable], Set]
"""Merge variables into multi-indexes.
Not public API. Used in Dataset and DataArray set_index
methods.
"""
vars_to_replace = {}
vars_to_remove = []
for dim, var_names in indexes.items():
if isinstance(var_names, basestring):
var_names = [var_names]
names, labels, levels = [], [], []
current_index_variable = variables.get(dim)
if current_index_variable is not None and append:
current_index = current_index_variable.to_index()
if isinstance(current_index, pd.MultiIndex):
names.extend(current_index.names)
labels.extend(current_index.labels)
levels.extend(current_index.levels)
else:
names.append('%s_level_0' % dim)
cat = pd.Categorical(current_index.values, ordered=True)
labels.append(cat.codes)
levels.append(cat.categories)
for n in var_names:
names.append(n)
var = variables[n]
if ((current_index_variable is not None) and
(var.dims != current_index_variable.dims)):
raise ValueError(
"dimension mismatch between %r %s and %r %s"
% (dim, current_index_variable.dims, n, var.dims))
else:
cat = pd.Categorical(var.values, ordered=True)
labels.append(cat.codes)
levels.append(cat.categories)
idx = pd.MultiIndex(labels=labels, levels=levels, names=names)
vars_to_replace[dim] = IndexVariable(dim, idx)
vars_to_remove.extend(var_names)
new_variables = OrderedDict([(k, v) for k, v in iteritems(variables)
if k not in vars_to_remove])
new_variables.update(vars_to_replace)
new_coord_names = coord_names | set(vars_to_replace)
new_coord_names -= set(vars_to_remove)
return new_variables, new_coord_names
def split_indexes(
dims_or_levels, # type: Union[Any, List[Any]]
variables, # type: Dict[Any, Variable]
coord_names, # type: Set
level_coords, # type: Dict[Any, Any]
drop=False, # type: bool
):
# type: (...) -> Tuple[OrderedDict[Any, Variable], Set]
"""Extract (multi-)indexes (levels) as variables.
Not public API. Used in Dataset and DataArray reset_index
methods.
"""
if isinstance(dims_or_levels, basestring):
dims_or_levels = [dims_or_levels]
dim_levels = defaultdict(list)
dims = []
for k in dims_or_levels:
if k in level_coords:
dim_levels[level_coords[k]].append(k)
else:
dims.append(k)
vars_to_replace = {}
vars_to_create = OrderedDict()
vars_to_remove = []
for d in dims:
index = variables[d].to_index()
if isinstance(index, pd.MultiIndex):
dim_levels[d] = index.names
else:
vars_to_remove.append(d)
if not drop:
vars_to_create[d + '_'] = Variable(d, index)
for d, levs in dim_levels.items():
index = variables[d].to_index()
if len(levs) == index.nlevels:
vars_to_remove.append(d)
else:
vars_to_replace[d] = IndexVariable(d, index.droplevel(levs))
if not drop:
for lev in levs:
idx = index.get_level_values(lev)
vars_to_create[idx.name] = Variable(d, idx)
new_variables = variables.copy()
for v in set(vars_to_remove):
del new_variables[v]
new_variables.update(vars_to_replace)
new_variables.update(vars_to_create)
new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove)
return new_variables, new_coord_names
def _assert_empty(args, msg='%s'):
if args:
raise ValueError(msg % args)
def as_dataset(obj):
"""Cast the given object to a Dataset.
Handles Datasets, DataArrays and dictionaries of variables. A new Dataset
object is only created if the provided object is not already one.
"""
if hasattr(obj, 'to_dataset'):
obj = obj.to_dataset()
if not isinstance(obj, Dataset):
obj = Dataset(obj)
return obj
class DataVariables(Mapping, formatting.ReprMixin):
def __init__(self, dataset):
self._dataset = dataset
def __iter__(self):
return (key for key in self._dataset._variables
if key not in self._dataset._coord_names)
def __len__(self):
return len(self._dataset._variables) - len(self._dataset._coord_names)
def __contains__(self, key):
return (key in self._dataset._variables and
key not in self._dataset._coord_names)
def __getitem__(self, key):
if key not in self._dataset._coord_names:
return self._dataset[key]
else:
raise KeyError(key)
def __unicode__(self):
return formatting.data_vars_repr(self)
@property
def variables(self):
all_variables = self._dataset.variables
return Frozen(OrderedDict((k, all_variables[k]) for k in self))
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return [key for key in self._dataset._ipython_key_completions_()
if key not in self._dataset._coord_names]
class _LocIndexer(object):
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, key):
if not utils.is_dict_like(key):
raise TypeError('can only lookup dictionaries from Dataset.loc')
return self.dataset.sel(**key)
class Dataset(Mapping, ImplementsDatasetReduce, BaseDataObject,
formatting.ReprMixin):
"""A multi-dimensional, in memory, array database.
A dataset resembles an in-memory representation of a NetCDF file, and
consists of variables, coordinates and attributes which together form a
self describing dataset.
Dataset implements the mapping interface with keys given by variable names
and values given by DataArray objects for each variable name.
One dimensional variables with name equal to their dimension are index
coordinates used for label based indexing.
"""
_groupby_cls = groupby.DatasetGroupBy
_rolling_cls = rolling.DatasetRolling
_resample_cls = resample.DatasetResample
def __init__(self, data_vars=None, coords=None, attrs=None,
compat='broadcast_equals'):
"""To load data from a file or file-like object, use the `open_dataset`
function.
Parameters
----------
data_vars : dict-like, optional
A mapping from variable names to :py:class:`~xarray.DataArray`
objects, :py:class:`~xarray.Variable` objects or tuples of the
form ``(dims, data[, attrs])`` which can be used as arguments to
create a new ``Variable``. Each dimension must have the same length
in all variables in which it appears.
coords : dict-like, optional
Another mapping in the same form as the `variables` argument,
except the each item is saved on the dataset as a "coordinate".
These variables have an associated meaning: they describe
constant/fixed/independent quantities, unlike the
varying/measured/dependent quantities that belong in `variables`.
Coordinates values may be given by 1-dimensional arrays or scalars,
in which case `dims` do not need to be supplied: 1D arrays will be
assumed to give index values along the dimension with the same
name.
attrs : dict-like, optional
Global attributes to save on this dataset.
compat : {'broadcast_equals', 'equals', 'identical'}, optional
String indicating how to compare variables of the same name for
potential conflicts when initializing this dataset:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
"""
self._variables = OrderedDict()
self._coord_names = set()
self._dims = {}
self._attrs = None
self._file_obj = None
if data_vars is None:
data_vars = {}
if coords is None:
coords = {}
if data_vars is not None or coords is not None:
self._set_init_vars_and_dims(data_vars, coords, compat)
if attrs is not None:
self.attrs = attrs
self._encoding = None
self._initialized = True
def _set_init_vars_and_dims(self, data_vars, coords, compat):
"""Set the initial value of Dataset variables and dimensions
"""
both_data_and_coords = [k for k in data_vars if k in coords]
if both_data_and_coords:
raise ValueError('variables %r are found in both data_vars and '
'coords' % both_data_and_coords)
if isinstance(coords, Dataset):
coords = coords.variables
variables, coord_names, dims = merge_data_and_coords(
data_vars, coords, compat=compat)
self._variables = variables
self._coord_names = coord_names
self._dims = dims
@classmethod
def load_store(cls, store, decoder=None):
"""Create a new dataset from the contents of a backends.*DataStore
object
"""
variables, attributes = store.load()
if decoder:
variables, attributes = decoder(variables, attributes)
obj = cls(variables, attrs=attributes)
obj._file_obj = store
return obj
@property
def variables(self):
"""Frozen dictionary of xarray.Variable objects constituting this
dataset's data
"""
return Frozen(self._variables)
def _attrs_copy(self):
return None if self._attrs is None else OrderedDict(self._attrs)
@property
def attrs(self):
"""Dictionary of global attributes on this dataset
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = OrderedDict(value)
@property
def encoding(self):
"""Dictionary of global encoding attributes on this dataset
"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = dict(value)
@property
def dims(self):
"""Mapping from dimension names to lengths.
Cannot be modified directly, but is updated when adding new variables.
Note that type of this object differs from `DataArray.dims`.
See `Dataset.sizes` and `DataArray.sizes` for consistently named
properties.
"""
return Frozen(SortedKeysDict(self._dims))
@property
def sizes(self):
"""Mapping from dimension names to lengths.
Cannot be modified directly, but is updated when adding new variables.
This is an alias for `Dataset.dims` provided for the benefit of
consistency with `DataArray.sizes`.
See also
--------
DataArray.sizes
"""
return self.dims
def load(self, **kwargs):
"""Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
# access .data to coerce everything to numpy or dask arrays
lazy_data = {k: v._data for k, v in self.variables.items()
if isinstance(v._data, dask_array_type)}
if lazy_data:
import dask.array as da
# evaluate all the dask arrays simultaneously
evaluated_data = da.compute(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
# load everything else sequentially
for k, v in self.variables.items():
if k not in lazy_data:
v.load()
return self
def __dask_graph__(self):
graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}
graphs = {k: v for k, v in graphs.items() if v is not None}
if not graphs:
return None
else:
from dask import sharedict
return sharedict.merge(*graphs.values())
def __dask_keys__(self):
import dask
return [v.__dask_keys__() for v in self.variables.values()
if dask.is_dask_collection(v)]
@property
def __dask_optimize__(self):
import dask.array as da
return da.Array.__dask_optimize__
@property
def __dask_scheduler__(self):
import dask.array as da
return da.Array.__dask_scheduler__
def __dask_postcompute__(self):
import dask
info = [(True, k, v.__dask_postcompute__())
if dask.is_dask_collection(v) else
(False, k, v) for k, v in self._variables.items()]
return self._dask_postcompute, (info, self._coord_names, self._dims,
self._attrs, self._file_obj,
self._encoding)
def __dask_postpersist__(self):
import dask
info = [(True, k, v.__dask_postpersist__())
if dask.is_dask_collection(v) else
(False, k, v) for k, v in self._variables.items()]
return self._dask_postpersist, (info, self._coord_names, self._dims,
self._attrs, self._file_obj,
self._encoding)
@staticmethod
def _dask_postcompute(results, info, *args):
variables = OrderedDict()
results2 = list(results[::-1])
for is_dask, k, v in info:
if is_dask:
func, args2 = v
r = results2.pop()
result = func(r, *args2)
else:
result = v
variables[k] = result
final = Dataset._construct_direct(variables, *args)
return final
@staticmethod
def _dask_postpersist(dsk, info, *args):
variables = OrderedDict()
for is_dask, k, v in info:
if is_dask:
func, args2 = v
result = func(dsk, *args2)
else:
result = v
variables[k] = result
return Dataset._construct_direct(variables, *args)
def compute(self, **kwargs):
"""Manually trigger loading of this dataset's data from disk or a
remote source into memory and return a new dataset. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def _persist_inplace(self, **kwargs):
""" Persist all Dask arrays in memory """
# access .data to coerce everything to numpy or dask arrays
lazy_data = {k: v._data for k, v in self.variables.items()
if isinstance(v._data, dask_array_type)}
if lazy_data:
import dask
# evaluate all the dask arrays simultaneously
evaluated_data = dask.persist(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
return self
def persist(self, **kwargs):
""" Trigger computation, keeping data as dask arrays
This operation can be used to trigger computation on underlying dask
arrays, similar to ``.compute()``. However this operation keeps the
data as dask arrays. This is particularly useful when using the
dask.distributed scheduler and you want to load a large amount of data
into distributed memory.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.persist``.
See Also
--------
dask.persist
"""
new = self.copy(deep=False)
return new._persist_inplace(**kwargs)
@classmethod
def _construct_direct(cls, variables, coord_names, dims=None, attrs=None,
file_obj=None, encoding=None):
"""Shortcut around __init__ for internal use when we want to skip
costly validation
"""
obj = object.__new__(cls)
obj._variables = variables
obj._coord_names = coord_names
obj._dims = dims
obj._attrs = attrs
obj._file_obj = file_obj
obj._encoding = encoding
obj._initialized = True
return obj
__default_attrs = object()
@classmethod
def _from_vars_and_coord_names(cls, variables, coord_names, attrs=None):
dims = dict(calculate_dimensions(variables))
return cls._construct_direct(variables, coord_names, dims, attrs)
def _replace_vars_and_dims(self, variables, coord_names=None, dims=None,
attrs=__default_attrs, inplace=False):
"""Fastpath constructor for internal use.
Preserves coord names and attributes. If not provided explicitly,
dimensions are recalculated from the supplied variables.
The arguments are *not* copied when placed on the new dataset. It is up
to the caller to ensure that they have the right type and are not used
elsewhere.
Parameters
----------
variables : OrderedDict
coord_names : set or None, optional
attrs : OrderedDict or None, optional
Returns
-------
new : Dataset
"""
if dims is None:
dims = calculate_dimensions(variables)
if inplace:
self._dims = dims
self._variables = variables
if coord_names is not None:
self._coord_names = coord_names
if attrs is not self.__default_attrs:
self._attrs = attrs
obj = self
else:
if coord_names is None:
coord_names = self._coord_names.copy()
if attrs is self.__default_attrs:
attrs = self._attrs_copy()
obj = self._construct_direct(variables, coord_names, dims, attrs)
return obj
def _replace_indexes(self, indexes):
if not len(indexes):
return self
variables = self._variables.copy()
for name, idx in indexes.items():
variables[name] = IndexVariable(name, idx)
obj = self._replace_vars_and_dims(variables)
# switch from dimension to level names, if necessary
dim_names = {}
for dim, idx in indexes.items():
if not isinstance(idx, pd.MultiIndex) and idx.name != dim:
dim_names[dim] = idx.name
if dim_names:
obj = obj.rename(dim_names)
return obj
def copy(self, deep=False):
"""Returns a copy of this dataset.
If `deep=True`, a deep copy is made of each of the component variables.
Otherwise, a shallow copy of each of the component variable is made, so
that the underlying memory region of the new dataset is the same as in
the original dataset.
"""
variables = OrderedDict((k, v.copy(deep=deep))
for k, v in iteritems(self._variables))
# skip __init__ to avoid costly validation
return self._construct_direct(variables, self._coord_names.copy(),
self._dims.copy(), self._attrs_copy(),
encoding=self.encoding)
def _subset_with_all_valid_coords(self, variables, coord_names, attrs):
needed_dims = set()
for v in variables.values():
needed_dims.update(v.dims)
for k in self._coord_names:
if set(self.variables[k].dims) <= needed_dims:
variables[k] = self._variables[k]
coord_names.add(k)
dims = dict((k, self._dims[k]) for k in needed_dims)
return self._construct_direct(variables, coord_names, dims, attrs)
@property
def _level_coords(self):
"""Return a mapping of all MultiIndex levels and their corresponding
coordinate name.
"""
level_coords = OrderedDict()
for cname in self._coord_names:
var = self.variables[cname]
if var.ndim == 1 and isinstance(var, IndexVariable):
level_names = var.level_names
if level_names is not None:
dim, = var.dims
level_coords.update({lname: dim for lname in level_names})
return level_coords
def _copy_listed(self, names):
"""Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation.
"""
variables = OrderedDict()
coord_names = set()
for name in names:
try:
variables[name] = self._variables[name]
except KeyError:
ref_name, var_name, var = _get_virtual_variable(
self._variables, name, self._level_coords, self.dims)
variables[var_name] = var
if ref_name in self._coord_names or ref_name in self.dims:
coord_names.add(var_name)
return self._subset_with_all_valid_coords(variables, coord_names,
attrs=self.attrs.copy())
def _construct_dataarray(self, name):
"""Construct a DataArray by indexing this dataset
"""
from .dataarray import DataArray
try:
variable = self._variables[name]
except KeyError:
_, name, variable = _get_virtual_variable(
self._variables, name, self._level_coords, self.dims)
coords = OrderedDict()
needed_dims = set(variable.dims)
for k in self.coords:
if set(self.variables[k].dims) <= needed_dims:
coords[k] = self.variables[k]
return DataArray(variable, coords, name=name, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
@property
def _attr_sources(self):
"""List of places to look-up items for attribute-style access"""
return self._item_sources + [self.attrs]
@property
def _item_sources(self):
"""List of places to look-up items for key-completion"""
return [self.data_vars, self.coords, {d: self[d] for d in self.dims},
LevelCoordinatesSource(self)]
def __dir__(self):
# In order to suppress a deprecation warning in Ipython autocompletion
# .T is explicitly removed from __dir__. GH: issue 1675
d = super(Dataset, self).__dir__()
d.remove('T')
return d
def __contains__(self, key):
"""The 'in' operator will return true or false depending on whether
'key' is an array in the dataset or not.
"""
return key in self._variables
def __len__(self):
warnings.warn('calling len() on an xarray.Dataset will change in '
'xarray v0.11 to only include data variables, not '
'coordinates. Call len() on the Dataset.variables '
'property instead, like ``len(ds.variables)``, to '
'preserve existing behavior in a forwards compatible '
'manner.',
FutureWarning, stacklevel=2)
return len(self._variables)
def __bool__(self):
warnings.warn('casting an xarray.Dataset to a boolean will change in '
'xarray v0.11 to only include data variables, not '
'coordinates. Cast the Dataset.variables property '
'instead to preserve existing behavior in a forwards '
'compatible manner.',
FutureWarning, stacklevel=2)
return bool(self._variables)
def __iter__(self):
warnings.warn('iteration over an xarray.Dataset will change in xarray '
'v0.11 to only include data variables, not coordinates. '
'Iterate over the Dataset.variables property instead to '
'preserve existing behavior in a forwards compatible '
'manner.',
FutureWarning, stacklevel=2)
return iter(self._variables)
@property
def nbytes(self):
return sum(v.nbytes for v in self.variables.values())
@property
def loc(self):
"""Attribute for location based indexing. Only supports __getitem__,
and only when the key is a dict of the form {dim: labels}.
"""
return _LocIndexer(self)
def __getitem__(self, key):
"""Access variables or coordinates this dataset as a
:py:class:`~xarray.DataArray`.
Indexing with a list of names will return a new ``Dataset`` object.
"""
if utils.is_dict_like(key):
return self.isel(**key)
if hashable(key):
return self._construct_dataarray(key)
else:
return self._copy_listed(np.asarray(key))
def __setitem__(self, key, value):
"""Add an array to this dataset.
If value is a `DataArray`, call its `select_vars()` method, rename it
to `key` and merge the contents of the resulting dataset into this
dataset.
If value is an `Variable` object (or tuple of form
``(dims, data[, attrs])``), add it to this dataset as a new
variable.
"""
if utils.is_dict_like(key):
raise NotImplementedError('cannot yet use a dictionary as a key '
'to set Dataset values')
self.update({key: value})
def __delitem__(self, key):
"""Remove a variable from this dataset.
"""
del self._variables[key]
self._coord_names.discard(key)
# mutable objects should not be hashable
__hash__ = None
def _all_compat(self, other, compat_str):
"""Helper function for equals and identical"""
# some stores (e.g., scipy) do not seem to preserve order, so don't
# require matching order for equality
def compat(x, y):
return getattr(x, compat_str)(y)
return (self._coord_names == other._coord_names and
utils.dict_equiv(self._variables, other._variables,
compat=compat))
def broadcast_equals(self, other):
"""Two Datasets are broadcast equal if they are equal after
broadcasting all variables against each other.
For example, variables that are scalar in one dataset but non-scalar in
the other dataset can still be broadcast equal if the the non-scalar
variable is a constant.
See Also
--------
Dataset.equals
Dataset.identical
"""
try:
return self._all_compat(other, 'broadcast_equals')
except (TypeError, AttributeError):
return False
def equals(self, other):
"""Two Datasets are equal if they have matching variables and
coordinates, all of which are equal.
Datasets can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for ``Dataset``
does element-wise comparisons (like numpy.ndarrays).
See Also
--------
Dataset.broadcast_equals
Dataset.identical
"""
try:
return self._all_compat(other, 'equals')
except (TypeError, AttributeError):
return False
def identical(self, other):
"""Like equals, but also checks all dataset attributes and the
attributes on all variables and coordinates.
See Also
--------
Dataset.broadcast_equals
Dataset.equals
"""
try:
return (utils.dict_equiv(self.attrs, other.attrs) and
self._all_compat(other, 'identical'))
except (TypeError, AttributeError):
return False
@property
def indexes(self):
"""OrderedDict of pandas.Index objects used for label based indexing
"""
return Indexes(self._variables, self._dims)
@property
def coords(self):
"""Dictionary of xarray.DataArray objects corresponding to coordinate
variables
"""
return DatasetCoordinates(self)
@property
def data_vars(self):
"""Dictionary of xarray.DataArray objects corresponding to data variables
"""
return DataVariables(self)
def set_coords(self, names, inplace=False):
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
if isinstance(names, basestring):
names = [names]
self._assert_all_in_dataset(names)
obj = self if inplace else self.copy()
obj._coord_names.update(names)
return obj
def reset_coords(self, names=None, drop=False, inplace=False):
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
if names is None:
names = self._coord_names - set(self.dims)
else:
if isinstance(names, basestring):
names = [names]
self._assert_all_in_dataset(names)
bad_coords = set(names) & set(self.dims)
if bad_coords:
raise ValueError(
'cannot remove index coordinates with reset_coords: %s'
% bad_coords)
obj = self if inplace else self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj
def dump_to_store(self, store, encoder=None, sync=True, encoding=None,
unlimited_dims=None):
"""Store dataset contents to a backends.*DataStore object."""
if encoding is None:
encoding = {}
variables, attrs = conventions.encode_dataset_coordinates(self)
check_encoding = set()
for k, enc in encoding.items():
# no need to shallow copy the variable again; that already happened
# in encode_dataset_coordinates
variables[k].encoding = enc
check_encoding.add(k)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs, check_encoding,
unlimited_dims=unlimited_dims)
if sync:
store.sync()
def to_netcdf(self, path=None, mode='w', format=None, group=None,
engine=None, encoding=None, unlimited_dims=None):
"""Write dataset contents to a netCDF file.
Parameters
----------
path : str, Path or file-like object, optional
Path to which to save this dataset. File-like objects are only
supported by the scipy engine. If no path is provided, this
function returns the resulting netCDF file as bytes; in this case,
we need to use scipy, which does not support netCDF version 4 (the
default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten. If mode='a', existing variables
will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', 'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,
'zlib': True}, ...}``
unlimited_dims : sequence of str, optional
Dimension(s) that should be serialized as unlimited dimensions.
By default, no dimensions are treated as unlimited dimensions.
Note that unlimited_dims may also be set via
``dataset.encoding['unlimited_dims']``.
"""
if encoding is None:
encoding = {}
from ..backends.api import to_netcdf
return to_netcdf(self, path, mode, format=format, group=group,
engine=engine, encoding=encoding,
unlimited_dims=unlimited_dims)
def __unicode__(self):
return formatting.dataset_repr(self)
def info(self, buf=None):
"""
Concise summary of a Dataset variables and attributes.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
See Also
--------
pandas.DataFrame.assign
netCDF's ncdump
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(u'xarray.Dataset {')
lines.append(u'dimensions:')
for name, size in self.dims.items():
lines.append(u'\t{name} = {size} ;'.format(name=name, size=size))
lines.append(u'\nvariables:')
for name, da in self.variables.items():
dims = u', '.join(da.dims)
lines.append(u'\t{type} {name}({dims}) ;'.format(
type=da.dtype, name=name, dims=dims))
for k, v in da.attrs.items():
lines.append(u'\t\t{name}:{k} = {v} ;'.format(name=name, k=k,
v=v))
lines.append(u'\n// global attributes:')
for k, v in self.attrs.items():
lines.append(u'\t:{k} = {v} ;'.format(k=k, v=v))
lines.append(u'}')
buf.write(u'\n'.join(lines))
@property
def chunks(self):
"""Block dimensions for this dataset's data or None if it's not a dask
array.
"""
chunks = {}
for v in self.variables.values():
if v.chunks is not None:
for dim, c in zip(v.dims, v.chunks):
if dim in chunks and c != chunks[dim]:
raise ValueError('inconsistent chunks')
chunks[dim] = c
return Frozen(SortedKeysDict(chunks))
def chunk(self, chunks=None, name_prefix='xarray-', token=None,
lock=False):
"""Coerce all arrays in this dataset into dask arrays with the given
chunks.
Non-dask arrays in this dataset will be converted to dask arrays. Dask
arrays will be rechunked to the given chunk sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int or dict, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``.
name_prefix : str, optional
Prefix for the name of any new dask arrays.
token : str, optional
Token uniquely identifying this dataset.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Dataset
"""
try:
from dask.base import tokenize
except ImportError:
import dask # raise the usual error if dask is entirely missing
raise ImportError('xarray requires dask version 0.6 or newer')
if isinstance(chunks, Number):
chunks = dict.fromkeys(self.dims, chunks)
if chunks is not None:
bad_dims = [d for d in chunks if d not in self.dims]
if bad_dims:
raise ValueError('some chunks keys are not dimensions on this '
'object: %s' % bad_dims)
def selkeys(dict_, keys):
if dict_ is None:
return None
return dict((d, dict_[d]) for d in keys if d in dict_)
def maybe_chunk(name, var, chunks):
chunks = selkeys(chunks, var.dims)
if not chunks:
chunks = None
if var.ndim > 0:
token2 = tokenize(name, token if token else var._data)
name2 = '%s%s-%s' % (name_prefix, name, token2)
return var.chunk(chunks, name=name2, lock=lock)
else:
return var
variables = OrderedDict([(k, maybe_chunk(k, v, chunks))
for k, v in self.variables.items()])
return self._replace_vars_and_dims(variables)
def _validate_indexers(self, indexers):
""" Here we make sure
+ indexer has a valid keys
+ indexer is in a valid data type
"""
from .dataarray import DataArray
invalid = [k for k in indexers if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
# all indexers should be int, slice, np.ndarrays, or Variable
indexers_list = []
for k, v in iteritems(indexers):
if isinstance(v, integer_types + (slice, Variable)):
pass
elif isinstance(v, DataArray):
v = v.variable
elif isinstance(v, tuple):
v = as_variable(v)
elif isinstance(v, Dataset):
raise TypeError('cannot use a Dataset as an indexer')
else:
v = np.asarray(v)
indexers_list.append((k, v))
return indexers_list
def _get_indexers_coordinates(self, indexers):
""" Extract coordinates from indexers.
Returns an OrderedDict mapping from coordinate name to the
coordinate variable.
Only coordinate with a name different from any of self.variables will
be attached.
"""
from .dataarray import DataArray
coord_list = []
for k, v in indexers.items():
if isinstance(v, DataArray):
v_coords = v.coords
if v.dtype.kind == 'b':
if v.ndim != 1: # we only support 1-d boolean array
raise ValueError(
'{:d}d-boolean array is used for indexing along '
'dimension {!r}, but only 1d boolean arrays are '
'supported.'.format(v.ndim, k))
# Make sure in case of boolean DataArray, its
# coordinate also should be indexed.
v_coords = v[v.values.nonzero()[0]].coords
coord_list.append({d: v_coords[d].variable for d in v.coords})
# we don't need to call align() explicitly, because merge_variables
# already checks for exact alignment between dimension coordinates
coords = merge_variables(coord_list)
for k in self.dims:
# make sure there are not conflict in dimension coordinates
if (k in coords and k in self._variables and
not coords[k].equals(self._variables[k])):
raise IndexError(
'dimension coordinate {!r} conflicts between '
'indexed and indexing objects:\n{}\nvs.\n{}'
.format(k, self._variables[k], coords[k]))
attached_coords = OrderedDict()
for k, v in coords.items(): # silently drop the conflicted variables.
if k not in self._variables:
attached_coords[k] = v
return attached_coords
def isel(self, drop=False, **indexers):
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
drop : bool, optional
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.sel
DataArray.isel
"""
indexers_list = self._validate_indexers(indexers)
variables = OrderedDict()
for name, var in iteritems(self._variables):
var_indexers = {k: v for k, v in indexers_list if k in var.dims}
new_var = var.isel(**var_indexers)
if not (drop and name in var_indexers):
variables[name] = new_var
coord_names = set(variables).intersection(self._coord_names)
selected = self._replace_vars_and_dims(variables,
coord_names=coord_names)
# Extract coordinates from indexers
coord_vars = selected._get_indexers_coordinates(indexers)
variables.update(coord_vars)
coord_names = (set(variables)
.intersection(self._coord_names)
.union(coord_vars))
return self._replace_vars_and_dims(variables, coord_names=coord_names)
def sel(self, method=None, tolerance=None, drop=False, **indexers):
"""Returns a new dataset with each array indexed by tick labels
along the specified dimension(s).
In contrast to `Dataset.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using pandas's powerful Index
objects. This makes label based indexing essentially just as fast as
using integer indexing.
It also means this method uses pandas's (well documented) logic for
indexing. This means you can use string shortcuts for datetime indexes
(e.g., '2000-01' to select all values in January 2000). It also means
that slices are treated as inclusive of both the start and stop values,
unlike normal Python indexing.
Parameters
----------
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for inexact matches (requires pandas>=0.16):
* None (default): only exact matches
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
drop : bool, optional
If ``drop=True``, drop coordinates variables in `indexers` instead
of making them scalar.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by scalars, slices or arrays of tick labels. For dimensions with
multi-index, the indexer may also be a dict-like object with keys
matching index level names.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
variable and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.isel
DataArray.sel
"""
from .dataarray import DataArray
v_indexers = {k: v.variable.data if isinstance(v, DataArray) else v
for k, v in indexers.items()}
pos_indexers, new_indexes = indexing.remap_label_indexers(
self, v_indexers, method=method, tolerance=tolerance
)
# attach indexer's coordinate to pos_indexers
for k, v in indexers.items():
if isinstance(v, Variable):
pos_indexers[k] = Variable(v.dims, pos_indexers[k])
elif isinstance(v, DataArray):
# drop coordinates found in indexers since .sel() already
# ensures alignments
coords = OrderedDict((k, v) for k, v in v._coords.items()
if k not in indexers)
pos_indexers[k] = DataArray(pos_indexers[k],
coords=coords, dims=v.dims)
result = self.isel(drop=drop, **pos_indexers)
return result._replace_indexes(new_indexes)
def isel_points(self, dim='points', **indexers):
# type: (...) -> Dataset
"""Returns a new dataset with each array indexed pointwise along the
specified dimension(s).
This method selects pointwise values from each array and is akin to
the NumPy indexing behavior of `arr[[0, 1], [0, 1]]`, except this
method does not require knowing the order of each array's dimensions.
Parameters
----------
dim : str or DataArray or pandas.Index or other list-like object, optional
Name of the dimension to concatenate along. If dim is provided as a
string, it must be a new dimension name, in which case it is added
along axis=0. If dim is provided as a DataArray or Index or
list-like object, its name, which must not be present in the
dataset, is used as the dimension to concatenate along and the
values are added as a coordinate.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by array-like objects. All indexers must be the same length and
1 dimensional.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. With
pointwise indexing, the new Dataset will always be a copy of the
original.
See Also
--------
Dataset.sel
Dataset.isel
Dataset.sel_points
DataArray.isel_points
"""
warnings.warn('Dataset.isel_points is deprecated: use Dataset.isel()'
'instead.', DeprecationWarning, stacklevel=2)
indexer_dims = set(indexers)
def take(variable, slices):
# Note: remove helper function when once when numpy
# supports vindex https://github.com/numpy/numpy/pull/6075
if hasattr(variable.data, 'vindex'):
# Special case for dask backed arrays to use vectorised list indexing
sel = variable.data.vindex[slices]
else:
# Otherwise assume backend is numpy array with 'fancy' indexing
sel = variable.data[slices]
return sel
def relevant_keys(mapping):
return [k for k, v in mapping.items()
if any(d in indexer_dims for d in v.dims)]
coords = relevant_keys(self.coords)
indexers = [(k, np.asarray(v)) for k, v in iteritems(indexers)]
indexers_dict = dict(indexers)
non_indexed_dims = set(self.dims) - indexer_dims
non_indexed_coords = set(self.coords) - set(coords)
# All the indexers should be iterables
# Check that indexers are valid dims, integers, and 1D
for k, v in indexers:
if k not in self.dims:
raise ValueError("dimension %s does not exist" % k)
if v.dtype.kind != 'i':
raise TypeError('Indexers must be integers')
if v.ndim != 1:
raise ValueError('Indexers must be 1 dimensional')
# all the indexers should have the same length
lengths = set(len(v) for k, v in indexers)
if len(lengths) > 1:
raise ValueError('All indexers must be the same length')
# Existing dimensions are not valid choices for the dim argument
if isinstance(dim, basestring):
if dim in self.dims:
# dim is an invalid string
raise ValueError('Existing dimension names are not valid '
'choices for the dim argument in sel_points')
elif hasattr(dim, 'dims'):
# dim is a DataArray or Coordinate
if dim.name in self.dims:
# dim already exists
raise ValueError('Existing dimensions are not valid choices '
'for the dim argument in sel_points')
# Set the new dim_name, and optionally the new dim coordinate
# dim is either an array-like or a string
if not utils.is_scalar(dim):
# dim is array like get name or assign 'points', get as variable
dim_name = 'points' if not hasattr(dim, 'name') else dim.name
dim_coord = as_variable(dim, name=dim_name)
else:
# dim is a string
dim_name = dim
dim_coord = None
reordered = self.transpose(*(list(indexer_dims) + list(non_indexed_dims)))
variables = OrderedDict()
for name, var in reordered.variables.items():
if name in indexers_dict or any(d in indexer_dims for d in var.dims):
# slice if var is an indexer or depends on an indexed dim
slc = [indexers_dict[k]
if k in indexers_dict
else slice(None) for k in var.dims]
var_dims = [dim_name] + [d for d in var.dims
if d in non_indexed_dims]
selection = take(var, tuple(slc))
var_subset = type(var)(var_dims, selection, var.attrs)
variables[name] = var_subset
else:
# If not indexed just add it back to variables or coordinates
variables[name] = var
coord_names = (set(coords) & set(variables)) | non_indexed_coords
dset = self._replace_vars_and_dims(variables, coord_names=coord_names)
# Add the dim coord to the new dset. Must be done after creation
# because_replace_vars_and_dims can only access existing coords,
# not add new ones
if dim_coord is not None:
dset.coords[dim_name] = dim_coord
return dset
def sel_points(self, dim='points', method=None, tolerance=None,
**indexers):
"""Returns a new dataset with each array indexed pointwise by tick
labels along the specified dimension(s).
In contrast to `Dataset.isel_points`, indexers for this method should
use labels instead of integers.
In contrast to `Dataset.sel`, this method selects points along the
diagonal of multi-dimensional arrays, not the intersection.
Parameters
----------
dim : str or DataArray or pandas.Index or other list-like object, optional
Name of the dimension to concatenate along. If dim is provided as a
string, it must be a new dimension name, in which case it is added
along axis=0. If dim is provided as a DataArray or Index or
list-like object, its name, which must not be present in the
dataset, is used as the dimension to concatenate along and the
values are added as a coordinate.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for inexact matches (requires pandas>=0.16):
* None (default): only exact matches
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by array-like objects. All indexers must be the same length and
1 dimensional.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. With
pointwise indexing, the new Dataset will always be a copy of the
original.
See Also
--------
Dataset.sel
Dataset.isel
Dataset.isel_points
DataArray.sel_points
"""
warnings.warn('Dataset.sel_points is deprecated: use Dataset.sel()'
'instead.', DeprecationWarning, stacklevel=2)
pos_indexers, _ = indexing.remap_label_indexers(
self, indexers, method=method, tolerance=tolerance
)
return self.isel_points(dim=dim, **pos_indexers)
def reindex_like(self, other, method=None, tolerance=None, copy=True):
"""Conform this object onto the indexes of another object, filling
in missing values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mis-matched index values will be filled in with
NaN, and any mis-matched dimension names will simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values from other not found in this
dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but coordinates from the
other object.
See Also
--------
Dataset.reindex
align
"""
indexers = alignment.reindex_like_indexers(self, other)
return self.reindex(method=method, copy=copy, tolerance=tolerance,
**indexers)
def reindex(self, indexers=None, method=None, tolerance=None, copy=True,
**kw_indexers):
"""Conform this object onto a new set of indexes, filling in
missing values with NaN.
Parameters
----------
indexers : dict. optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
**kw_indexers : optional
Keyword arguments in the same form as ``indexers``.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.reindex_like
align
pandas.Index.get_indexer
"""
indexers = utils.combine_pos_and_kw_args(indexers, kw_indexers,
'reindex')
bad_dims = [d for d in indexers if d not in self.dims]
if bad_dims:
raise ValueError('invalid reindex dimensions: %s' % bad_dims)
variables = alignment.reindex_variables(
self.variables, self.sizes, self.indexes, indexers, method,
tolerance, copy=copy)
coord_names = set(self._coord_names)
coord_names.update(indexers)
return self._replace_vars_and_dims(variables, coord_names)
def rename(self, name_dict, inplace=False):
"""Returns a new object with renamed variables and dimensions.
Parameters
----------
name_dict : dict-like
Dictionary whose keys are current variable or dimension names and
whose values are the desired names.
inplace : bool, optional
If True, rename variables and dimensions in-place. Otherwise,
return a new dataset object.
Returns
-------
renamed : Dataset
Dataset with renamed variables and dimensions.
See Also
--------
Dataset.swap_dims
DataArray.rename
"""
for k, v in name_dict.items():
if k not in self and k not in self.dims:
raise ValueError("cannot rename %r because it is not a "
"variable or dimension in this dataset" % k)
variables = OrderedDict()
coord_names = set()
for k, v in iteritems(self._variables):
name = name_dict.get(k, k)
dims = tuple(name_dict.get(dim, dim) for dim in v.dims)
var = v.copy(deep=False)
var.dims = dims
if name in variables:
raise ValueError('the new name %r conflicts' % (name,))
variables[name] = var
if k in self._coord_names:
coord_names.add(name)
dims = OrderedDict((name_dict.get(k, k), v)
for k, v in self.dims.items())
return self._replace_vars_and_dims(variables, coord_names, dims=dims,
inplace=inplace)
def swap_dims(self, dims_dict, inplace=False):
"""Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a variable in the
dataset.
inplace : bool, optional
If True, swap dimensions in-place. Otherwise, return a new dataset
object.
Returns
-------
renamed : Dataset
Dataset with swapped dimensions.
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
for k, v in dims_dict.items():
if k not in self.dims:
raise ValueError('cannot swap from dimension %r because it is '
'not an existing dimension' % k)
if self.variables[v].dims != (k,):
raise ValueError('replacement dimension %r is not a 1D '
'variable along the old dimension %r'
% (v, k))
result_dims = set(dims_dict.get(dim, dim) for dim in self.dims)
variables = OrderedDict()
coord_names = self._coord_names.copy()
coord_names.update(dims_dict.values())
for k, v in iteritems(self.variables):
dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
if k in result_dims:
var = v.to_index_variable()
else:
var = v.to_base_variable()
var.dims = dims
variables[k] = var
return self._replace_vars_and_dims(variables, coord_names,
inplace=inplace)
def expand_dims(self, dim, axis=None):
"""Return a new object with an additional axis (or axes) inserted at the
corresponding position in the array shape.
If dim is already a scalar coordinate, it will be promoted to a 1D
coordinate consisting of a single value.
Parameters
----------
dim : str or sequence of str.
Dimensions to include on the new variable.
dimensions are inserted with length 1.
axis : integer, list (or tuple) of integers, or None
Axis position(s) where new axis is to be inserted (position(s) on
the result array). If a list (or tuple) of integers is passed,
multiple axes are inserted. In this case, dim arguments should be
the same length list. If axis=None is passed, all the axes will
be inserted to the start of the result array.
Returns
-------
expanded : same type as caller
This object, but with an additional dimension(s).
"""
if isinstance(dim, int):
raise ValueError('dim should be str or sequence of strs or dict')
if isinstance(dim, basestring):
dim = [dim]
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if axis is None:
axis = list(range(len(dim)))
if len(dim) != len(axis):
raise ValueError('lengths of dim and axis should be identical.')
for d in dim:
if d in self.dims:
raise ValueError(
'Dimension {dim} already exists.'.format(dim=d))
if (d in self._variables and
not utils.is_scalar(self._variables[d])):
raise ValueError(
'{dim} already exists as coordinate or'
' variable name.'.format(dim=d))
if len(dim) != len(set(dim)):
raise ValueError('dims should not contain duplicate values.')
variables = OrderedDict()
for k, v in iteritems(self._variables):
if k not in dim:
if k in self._coord_names: # Do not change coordinates
variables[k] = v
else:
result_ndim = len(v.dims) + len(axis)
for a in axis:
if a < -result_ndim or result_ndim - 1 < a:
raise IndexError(
'Axis {a} is out of bounds of the expanded'
' dimension size {dim}.'.format(
a=a, v=k, dim=result_ndim))
axis_pos = [a if a >= 0 else result_ndim + a
for a in axis]
if len(axis_pos) != len(set(axis_pos)):
raise ValueError('axis should not contain duplicate'
' values.')
# We need to sort them to make sure `axis` equals to the
# axis positions of the result array.
zip_axis_dim = sorted(zip(axis_pos, dim))
all_dims = list(v.dims)
for a, d in zip_axis_dim:
all_dims.insert(a, d)
variables[k] = v.set_dims(all_dims)
else:
# If dims includes a label of a non-dimension coordinate,
# it will be promoted to a 1D coordinate with a single value.
variables[k] = v.set_dims(k)
return self._replace_vars_and_dims(variables, self._coord_names)
def set_index(self, append=False, inplace=False, **indexes):
"""Set Dataset (multi-)indexes using one or more existing coordinates or
variables.
Parameters
----------
append : bool, optional
If True, append the supplied index(es) to the existing index(es).
Otherwise replace the existing index(es) (default).
inplace : bool, optional
If True, set new index(es) in-place. Otherwise, return a new
Dataset object.
**indexes : {dim: index, ...}
Keyword arguments with names matching dimensions and values given
by (lists of) the names of existing coordinates or variables to set
as new (multi-)index.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.reset_index
"""
variables, coord_names = merge_indexes(indexes, self._variables,
self._coord_names,
append=append)
return self._replace_vars_and_dims(variables, coord_names=coord_names,
inplace=inplace)
def reset_index(self, dims_or_levels, drop=False, inplace=False):
"""Reset the specified index(es) or multi-index level(s).
Parameters
----------
dims_or_levels : str or list
Name(s) of the dimension(s) and/or multi-index level(s) that will
be reset.
drop : bool, optional
If True, remove the specified indexes and/or multi-index levels
instead of extracting them as new coordinates (default: False).
inplace : bool, optional
If True, modify the dataset in-place. Otherwise, return a new
Dataset object.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.set_index
"""
variables, coord_names = split_indexes(dims_or_levels, self._variables,
self._coord_names,
self._level_coords, drop=drop)
return self._replace_vars_and_dims(variables, coord_names=coord_names,
inplace=inplace)
def reorder_levels(self, inplace=False, **dim_order):
"""Rearrange index levels using input order.
Parameters
----------
inplace : bool, optional
If True, modify the dataset in-place. Otherwise, return a new
DataArray object.
**dim_order : optional
Keyword arguments with names matching dimensions and values given
by lists representing new level orders. Every given dimension
must have a multi-index.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced
coordinates.
"""
replace_variables = {}
for dim, order in dim_order.items():
coord = self._variables[dim]
index = coord.to_index()
if not isinstance(index, pd.MultiIndex):
raise ValueError("coordinate %r has no MultiIndex" % dim)
replace_variables[dim] = IndexVariable(coord.dims,
index.reorder_levels(order))
variables = self._variables.copy()
variables.update(replace_variables)
return self._replace_vars_and_dims(variables, inplace=inplace)
def _stack_once(self, dims, new_dim):
variables = OrderedDict()
for name, var in self.variables.items():
if name not in dims:
if any(d in var.dims for d in dims):
add_dims = [d for d in dims if d not in var.dims]
vdims = list(var.dims) + add_dims
shape = [self.dims[d] for d in vdims]
exp_var = var.set_dims(vdims, shape)
stacked_var = exp_var.stack(**{new_dim: dims})
variables[name] = stacked_var
else:
variables[name] = var.copy(deep=False)
# consider dropping levels that are unused?
levels = [self.get_index(dim) for dim in dims]
if hasattr(pd, 'RangeIndex'):
# RangeIndex levels in a MultiIndex are broken for appending in
# pandas before v0.19.0
levels = [pd.Int64Index(level)
if isinstance(level, pd.RangeIndex)
else level
for level in levels]
idx = utils.multiindex_from_product_levels(levels, names=dims)
variables[new_dim] = IndexVariable(new_dim, idx)
coord_names = set(self._coord_names) - set(dims) | set([new_dim])
return self._replace_vars_and_dims(variables, coord_names)
def stack(self, **dimensions):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the corresponding
coordinate variables will be combined into a MultiIndex.
Parameters
----------
**dimensions : keyword arguments of the form new_name=(dim1, dim2, ...)
Names of new dimensions, and the existing dimensions that they
replace.
Returns
-------
stacked : Dataset
Dataset with stacked data.
See also
--------
Dataset.unstack
"""
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def unstack(self, dim):
"""
Unstack an existing dimension corresponding to a MultiIndex into
multiple new dimensions.
New dimensions will be added at the end.
Parameters
----------
dim : str
Name of the existing dimension to unstack.
Returns
-------
unstacked : Dataset
Dataset with unstacked data.
See also
--------
Dataset.stack
"""
if dim not in self.dims:
raise ValueError('invalid dimension: %s' % dim)
index = self.get_index(dim)
if not isinstance(index, pd.MultiIndex):
raise ValueError('cannot unstack a dimension that does not have '
'a MultiIndex')
full_idx = pd.MultiIndex.from_product(index.levels, names=index.names)
obj = self.reindex(copy=False, **{dim: full_idx})
new_dim_names = index.names
new_dim_sizes = [lev.size for lev in index.levels]
variables = OrderedDict()
for name, var in obj.variables.items():
if name != dim:
if dim in var.dims:
new_dims = OrderedDict(zip(new_dim_names, new_dim_sizes))
variables[name] = var.unstack(**{dim: new_dims})
else:
variables[name] = var
for name, lev in zip(new_dim_names, index.levels):
variables[name] = IndexVariable(name, lev)
coord_names = set(self._coord_names) - set([dim]) | set(new_dim_names)
return self._replace_vars_and_dims(variables, coord_names)
def update(self, other, inplace=True):
"""Update this dataset's variables with those from another dataset.
Parameters
----------
other : Dataset or castable to Dataset
Dataset or variables with which to update this dataset.
inplace : bool, optional
If True, merge the other dataset into this dataset in-place.
Otherwise, return a new dataset object.
Returns
-------
updated : Dataset
Updated dataset.
Raises
------
ValueError
If any dimensions would have inconsistent sizes in the updated
dataset.
"""
variables, coord_names, dims = dataset_update_method(self, other)
return self._replace_vars_and_dims(variables, coord_names, dims,
inplace=inplace)
def merge(self, other, inplace=False, overwrite_vars=frozenset(),
compat='no_conflicts', join='outer'):
"""Merge the arrays of two datasets into a single dataset.
This method generally not allow for overriding data, with the exception
of attributes, which are ignored on the second dataset. Variables with
the same name are checked for conflicts via the equals or identical
methods.
Parameters
----------
other : Dataset or castable to Dataset
Dataset or variables to merge with this dataset.
inplace : bool, optional
If True, merge the other dataset into this dataset in-place.
Otherwise, return a new dataset object.
overwrite_vars : str or sequence, optional
If provided, update variables of these name(s) without checking for
conflicts in this dataset.
compat : {'broadcast_equals', 'equals', 'identical',
'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining ``self`` and ``other`` along shared dimensions:
- 'outer': use the union of the indexes
- 'inner': use the intersection of the indexes
- 'left': use indexes from ``self``
- 'right': use indexes from ``other``
- 'exact': error instead of aligning non-equal indexes
Returns
-------
merged : Dataset
Merged dataset.
Raises
------
MergeError
If any variables conflict (see ``compat``).
"""
variables, coord_names, dims = dataset_merge_method(
self, other, overwrite_vars=overwrite_vars, compat=compat,
join=join)
return self._replace_vars_and_dims(variables, coord_names, dims,
inplace=inplace)
def _assert_all_in_dataset(self, names, virtual_okay=False):
bad_names = set(names) - set(self._variables)
if virtual_okay:
bad_names -= self.virtual_variables
if bad_names:
raise ValueError('One or more of the specified variables '
'cannot be found in this dataset')
def drop(self, labels, dim=None):
"""Drop variables or index labels from this dataset.
Parameters
----------
labels : scalar or list of scalars
Name(s) of variables or index labels to drop.
dim : None or str, optional
Dimension along which to drop index labels. By default (if
``dim is None``), drops variables rather than index labels.
Returns
-------
dropped : Dataset
"""
if utils.is_scalar(labels):
labels = [labels]
if dim is None:
return self._drop_vars(labels)
else:
try:
index = self.indexes[dim]
except KeyError:
raise ValueError(
'dimension %r does not have coordinate labels' % dim)
new_index = index.drop(labels)
return self.loc[{dim: new_index}]
def _drop_vars(self, names):
self._assert_all_in_dataset(names)
drop = set(names)
variables = OrderedDict((k, v) for k, v in iteritems(self._variables)
if k not in drop)
coord_names = set(k for k in self._coord_names if k in variables)
return self._replace_vars_and_dims(variables, coord_names)
def transpose(self, *dims):
"""Return a new Dataset object with all array dimensions transposed.
Although the order of dimensions on each array will change, the dataset
dimensions themselves will remain in fixed (sorted) order.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions on each array. Otherwise,
reorder the dimensions to this order.
Returns
-------
transposed : Dataset
Each array in the dataset (including) coordinates will be
transposed to the given order.
Notes
-----
Although this operation returns a view of each array's data, it
is not lazy -- the data will be fully loaded into memory.
See Also
--------
numpy.transpose
DataArray.transpose
"""
if dims:
if set(dims) ^ set(self.dims):
raise ValueError('arguments to transpose (%s) must be '
'permuted dataset dimensions (%s)'
% (dims, tuple(self.dims)))
ds = self.copy()
for name, var in iteritems(self._variables):
var_dims = tuple(dim for dim in dims if dim in var.dims)
ds._variables[name] = var.transpose(*var_dims)
return ds
@property
def T(self):
warnings.warn('xarray.Dataset.T has been deprecated as an alias for '
'`.transpose()`. It will be removed in xarray v0.11.',
FutureWarning, stacklevel=2)
return self.transpose()
def dropna(self, dim, how='any', thresh=None, subset=None):
"""Returns a new dataset with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : str
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {'any', 'all'}, optional
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
If supplied, require this many non-NA values.
subset : sequence, optional
Subset of variables to check for missing values. By default, all
variables in the dataset are checked.
Returns
-------
Dataset
"""
# TODO: consider supporting multiple dimensions? Or not, given that
# there are some ugly edge cases, e.g., pandas's dropna differs
# depending on the order of the supplied axes.
if dim not in self.dims:
raise ValueError('%s must be a single dataset dimension' % dim)
if subset is None:
subset = list(self.data_vars)
count = np.zeros(self.dims[dim], dtype=np.int64)
size = 0
for k in subset:
array = self._variables[k]
if dim in array.dims:
dims = [d for d in array.dims if d != dim]
count += array.count(dims)
size += np.prod([self.dims[d] for d in dims])
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == size
elif how == 'all':
mask = count > 0
elif how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
return self.isel(**{dim: mask})
def fillna(self, value):
"""Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : scalar, ndarray, DataArray, dict or Dataset
Used to fill all matching missing values in this dataset's data
variables. Scalars, ndarrays or DataArrays arguments are used to
fill all data with aligned coordinates (for DataArrays).
Dictionaries or datasets match data variables and then align
coordinates if necessary.
Returns
-------
Dataset
"""
if utils.is_dict_like(value):
value_keys = getattr(value, 'data_vars', value).keys()
if not set(value_keys) <= set(self.data_vars.keys()):
raise ValueError('all variables in the argument to `fillna` '
'must be contained in the original dataset')
out = ops.fillna(self, value)
return out
def combine_first(self, other):
"""Combine two Datasets, default to data_vars of self.
The new coordinates follow the normal broadcasting and alignment rules
of ``join='outer'``. Vacant cells in the expanded coordinates are
filled with np.nan.
Parameters
----------
other : DataArray
Used to fill all matching missing values in this array.
Returns
-------
DataArray
"""
out = ops.fillna(self, other, join="outer", dataset_join="outer")
return out
def reduce(self, func, dim=None, keep_attrs=False, numeric_only=False,
allow_lazy=False, **kwargs):
"""Reduce this dataset by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
**kwargs : dict
Additional keyword arguments passed on to ``func``.
Returns
-------
reduced : Dataset
Dataset with this object's DataArrays replaced with new DataArrays
of summarized data and the indicated dimension(s) removed.
"""
if isinstance(dim, basestring):
dims = set([dim])
elif dim is None:
dims = set(self.dims)
else:
dims = set(dim)
missing_dimensions = [dim for dim in dims if dim not in self.dims]
if missing_dimensions:
raise ValueError('Dataset does not contain the dimensions: %s'
% missing_dimensions)
variables = OrderedDict()
for name, var in iteritems(self._variables):
reduce_dims = [dim for dim in var.dims if dim in dims]
if reduce_dims or not var.dims:
if name not in self.coords:
if (not numeric_only or
np.issubdtype(var.dtype, np.number) or
(var.dtype == np.bool_)):
if len(reduce_dims) == 1:
# unpack dimensions for the benefit of functions
# like np.argmin which can't handle tuple arguments
reduce_dims, = reduce_dims
elif len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
reduce_dims = None
variables[name] = var.reduce(func, dim=reduce_dims,
keep_attrs=keep_attrs,
allow_lazy=allow_lazy,
**kwargs)
else:
variables[name] = var
coord_names = set(k for k in self.coords if k in variables)
attrs = self.attrs if keep_attrs else None
return self._replace_vars_and_dims(variables, coord_names, attrs=attrs)
def apply(self, func, keep_attrs=False, args=(), **kwargs):
"""Apply a function over the data variables in this dataset.
Parameters
----------
func : function
Function which can be called in the form `f(x, **kwargs)` to
transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new object will
be returned without attributes.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs : dict
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` over each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})
>>> ds
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948
bar (x) int64 -1 2
>>> ds.apply(np.fabs)
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948
bar (x) float64 1.0 2.0
"""
variables = OrderedDict(
(k, maybe_wrap_array(v, func(v, *args, **kwargs)))
for k, v in iteritems(self.data_vars))
attrs = self.attrs if keep_attrs else None
return type(self)(variables, attrs=attrs)
def assign(self, **kwargs):
"""Assign new data variables to a Dataset, returning a new object
with all the original variables in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the variables names. If the values are callable, they
are computed on the Dataset and assigned to new data variables. If
the values are not callable, (e.g. a DataArray, scalar, or array),
they are simply assigned.
Returns
-------
ds : Dataset
A new Dataset with the new variables in addition to all the
existing variables.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign`` is
possible, but you cannot reference other variables created within the
same ``assign`` call.
See Also
--------
pandas.DataFrame.assign
"""
data = self.copy()
# do all calculations first...
results = data._calc_assign_results(kwargs)
# ... and then assign
data.update(results)
return data
def to_array(self, dim='variable', name=None):
"""Convert this dataset into an xarray.DataArray
The data variables of this dataset will be broadcast against each other
and stacked along the first axis of the new array. All coordinates of
this dataset will remain coordinates.
Parameters
----------
dim : str, optional
Name of the new dimension.
name : str, optional
Name of the new data array.
Returns
-------
array : xarray.DataArray
"""
from .dataarray import DataArray
data_vars = [self.variables[k] for k in self.data_vars]
broadcast_vars = broadcast_variables(*data_vars)
data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0)
coords = dict(self.coords)
coords[dim] = list(self.data_vars)
dims = (dim,) + broadcast_vars[0].dims
return DataArray(data, coords, dims, attrs=self.attrs, name=name)
def _to_dataframe(self, ordered_dims):
columns = [k for k in self.variables if k not in self.dims]
data = [self._variables[k].set_dims(ordered_dims).values.reshape(-1)
for k in columns]
index = self.coords.to_index(ordered_dims)
return pd.DataFrame(OrderedDict(zip(columns, data)), index=index)
def to_dataframe(self):
"""Convert this dataset into a pandas.DataFrame.
Non-index variables in this dataset form the columns of the
DataFrame. The DataFrame is be indexed by the Cartesian product of
this dataset's indices.
"""
return self._to_dataframe(self.dims)
@classmethod
def from_dataframe(cls, dataframe):
"""Convert a pandas.DataFrame into an xarray.Dataset
Each column will be converted into an independent variable in the
Dataset. If the dataframe's index is a MultiIndex, it will be expanded
into a tensor product of one-dimensional indices (filling in missing
values with NaN). This method will produce a Dataset very similar to
that on which the 'to_dataframe' method was called, except with
possibly redundant dimensions (since all dataset variables will have
the same dimensionality).
"""
# TODO: Add an option to remove dimensions along which the variables
# are constant, to enable consistent serialization to/from a dataframe,
# even if some variables have different dimensionality.
if not dataframe.columns.is_unique:
raise ValueError(
'cannot convert DataFrame with non-unique columns')
idx = dataframe.index
obj = cls()
if hasattr(idx, 'levels'):
# it's a multi-index
# expand the DataFrame to include the product of all levels
full_idx = pd.MultiIndex.from_product(idx.levels, names=idx.names)
dataframe = dataframe.reindex(full_idx)
dims = [name if name is not None else 'level_%i' % n
for n, name in enumerate(idx.names)]
for dim, lev in zip(dims, idx.levels):
obj[dim] = (dim, lev)
shape = [lev.size for lev in idx.levels]
else:
dims = (idx.name if idx.name is not None else 'index',)
obj[dims[0]] = (dims, idx)
shape = -1
for name, series in iteritems(dataframe):
data = np.asarray(series).reshape(shape)
obj[name] = (dims, data)
return obj
def to_dask_dataframe(self, dim_order=None, set_index=False):
"""
Convert this dataset into a dask.dataframe.DataFrame.
The dimensions, coordinates and data variables in this dataset form
the columns of the DataFrame.
Arguments
---------
dim_order : list, optional
Hierarchical dimension order for the resulting dataframe. All
arrays are transposed to this order and then written out as flat
vectors in contiguous order, so the last dimension in this list
will be contiguous in the resulting DataFrame. This has a major
influence on which operations are efficient on the resulting dask
dataframe.
If provided, must include all dimensions on this dataset. By
default, dimensions are sorted alphabetically.
set_index : bool, optional
If set_index=True, the dask DataFrame is indexed by this dataset's
coordinate. Since dask DataFrames to not support multi-indexes,
set_index only works if the dataset only contains one dimension.
Returns
-------
dask.dataframe.DataFrame
"""
import dask.array as da
import dask.dataframe as dd
if dim_order is None:
dim_order = list(self.dims)
elif set(dim_order) != set(self.dims):
raise ValueError(
'dim_order {} does not match the set of dimensions on this '
'Dataset: {}'.format(dim_order, list(self.dims)))
ordered_dims = OrderedDict((k, self.dims[k]) for k in dim_order)
columns = list(ordered_dims)
columns.extend(k for k in self.coords if k not in self.dims)
columns.extend(self.data_vars)
series_list = []
for name in columns:
try:
var = self.variables[name]
except KeyError:
# dimension without a matching coordinate
size = self.dims[name]
data = da.arange(size, chunks=size, dtype=np.int64)
var = Variable((name,), data)
# IndexVariable objects have a dummy .chunk() method
if isinstance(var, IndexVariable):
var = var.to_base_variable()
dask_array = var.set_dims(ordered_dims).chunk(self.chunks).data
series = dd.from_array(dask_array.reshape(-1), columns=[name])
series_list.append(series)
df = dd.concat(series_list, axis=1)
if set_index:
if len(dim_order) == 1:
(dim,) = dim_order
df = df.set_index(dim)
else:
# triggers an error about multi-indexes, even if only one
# dimension is passed
df = df.set_index(dim_order)
return df
def to_dict(self):
"""
Convert this dataset to a dictionary following xarray naming
conventions.
Converts all variables and attributes to native Python objects
Useful for coverting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
See also
--------
Dataset.from_dict
"""
d = {'coords': {}, 'attrs': decode_numpy_dict_values(self.attrs),
'dims': dict(self.dims), 'data_vars': {}}
for k in self.coords:
data = ensure_us_time_resolution(self[k].values).tolist()
d['coords'].update({
k: {'data': data,
'dims': self[k].dims,
'attrs': decode_numpy_dict_values(self[k].attrs)}})
for k in self.data_vars:
data = ensure_us_time_resolution(self[k].values).tolist()
d['data_vars'].update({
k: {'data': data,
'dims': self[k].dims,
'attrs': decode_numpy_dict_values(self[k].attrs)}})
return d
@classmethod
def from_dict(cls, d):
"""
Convert a dictionary into an xarray.Dataset.
Input dict can take several forms::
d = {'t': {'dims': ('t'), 'data': t},
'a': {'dims': ('t'), 'data': x},
'b': {'dims': ('t'), 'data': y}}
d = {'coords': {'t': {'dims': 't', 'data': t,
'attrs': {'units':'s'}}},
'attrs': {'title': 'air temperature'},
'dims': 't',
'data_vars': {'a': {'dims': 't', 'data': x, },
'b': {'dims': 't', 'data': y}}}
where 't' is the name of the dimesion, 'a' and 'b' are names of data
variables and t, x, and y are lists, numpy.arrays or pandas objects.
Parameters
----------
d : dict, with a minimum structure of {'var_0': {'dims': [..], \
'data': [..]}, \
...}
Returns
-------
obj : xarray.Dataset
See also
--------
Dataset.to_dict
DataArray.from_dict
"""
if not set(['coords', 'data_vars']).issubset(set(d)):
variables = d.items()
else:
import itertools
variables = itertools.chain(d.get('coords', {}).items(),
d.get('data_vars', {}).items())
try:
variable_dict = OrderedDict([(k, (v['dims'],
v['data'],
v.get('attrs'))) for
k, v in variables])
except KeyError as e:
raise ValueError(
"cannot convert dict without the key "
"'{dims_data}'".format(dims_data=str(e.args[0])))
obj = cls(variable_dict)
# what if coords aren't dims?
coords = set(d.get('coords', {})) - set(d.get('dims', {}))
obj = obj.set_coords(coords)
obj.attrs.update(d.get('attrs', {}))
return obj
@staticmethod
def _unary_op(f, keep_attrs=False):
@functools.wraps(f)
def func(self, *args, **kwargs):
ds = self.coords.to_dataset()
for k in self.data_vars:
ds._variables[k] = f(self._variables[k], *args, **kwargs)
if keep_attrs:
ds._attrs = self._attrs
return ds
return func
@staticmethod
def _binary_op(f, reflexive=False, join=None):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
return NotImplemented
align_type = OPTIONS['arithmetic_join'] if join is None else join
if hasattr(other, 'indexes'):
self, other = align(self, other, join=align_type, copy=False)
g = f if not reflexive else lambda x, y: f(y, x)
ds = self._calculate_binary_op(g, other, join=align_type)
return ds
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
raise TypeError('in-place operations between a Dataset and '
'a grouped object are not permitted')
# we don't actually modify arrays in-place with in-place Dataset
# arithmetic -- this lets us automatically align things
if hasattr(other, 'indexes'):
other = other.reindex_like(self, copy=False)
g = ops.inplace_to_noninplace_op(f)
ds = self._calculate_binary_op(g, other, inplace=True)
self._replace_vars_and_dims(ds._variables, ds._coord_names,
attrs=ds._attrs, inplace=True)
return self
return func
def _calculate_binary_op(self, f, other, join='inner',
inplace=False):
def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):
if inplace and set(lhs_data_vars) != set(rhs_data_vars):
raise ValueError('datasets must have the same data variables '
'for in-place arithmetic operations: %s, %s'
% (list(lhs_data_vars), list(rhs_data_vars)))
dest_vars = OrderedDict()
for k in lhs_data_vars:
if k in rhs_data_vars:
dest_vars[k] = f(lhs_vars[k], rhs_vars[k])
elif join in ["left", "outer"]:
dest_vars[k] = f(lhs_vars[k], np.nan)
for k in rhs_data_vars:
if k not in dest_vars and join in ["right", "outer"]:
dest_vars[k] = f(rhs_vars[k], np.nan)
return dest_vars
if utils.is_dict_like(other) and not isinstance(other, Dataset):
# can't use our shortcut of doing the binary operation with
# Variable objects, so apply over our data vars instead.
new_data_vars = apply_over_both(self.data_vars, other,
self.data_vars, other)
return Dataset(new_data_vars)
other_coords = getattr(other, 'coords', None)
ds = self.coords.merge(other_coords)
if isinstance(other, Dataset):
new_vars = apply_over_both(self.data_vars, other.data_vars,
self.variables, other.variables)
else:
other_variable = getattr(other, 'variable', other)
new_vars = OrderedDict((k, f(self.variables[k], other_variable))
for k in self.data_vars)
ds._variables.update(new_vars)
ds._dims = calculate_dimensions(ds._variables)
return ds
def _copy_attrs_from(self, other):
self.attrs = other.attrs
for v in other.variables:
if v in self.variables:
self.variables[v].attrs = other.variables[v].attrs
def diff(self, dim, n=1, label='upper'):
"""Calculate the n-th order discrete difference along given axis.
Parameters
----------
dim : str, optional
Dimension over which to calculate the finite difference.
n : int, optional
The number of times values are differenced.
label : str, optional
The new coordinate in dimension ``dim`` will have the
values of either the minuend's or subtrahend's coordinate
for values 'upper' and 'lower', respectively. Other
values are not supported.
Returns
-------
difference : same type as caller
The n-th order finite difference of this object.
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', [5, 5, 6, 6])})
>>> ds.diff('x')
<xarray.Dataset>
Dimensions: (x: 3)
Coordinates:
* x (x) int64 1 2 3
Data variables:
foo (x) int64 0 1 0
>>> ds.diff('x', 2)
<xarray.Dataset>
Dimensions: (x: 2)
Coordinates:
* x (x) int64 2 3
Data variables:
foo (x) int64 1 -1
"""
if n == 0:
return self
if n < 0:
raise ValueError('order `n` must be non-negative but got {0}'
''.format(n))
# prepare slices
kwargs_start = {dim: slice(None, -1)}
kwargs_end = {dim: slice(1, None)}
# prepare new coordinate
if label == 'upper':
kwargs_new = kwargs_end
elif label == 'lower':
kwargs_new = kwargs_start
else:
raise ValueError('The \'label\' argument has to be either '
'\'upper\' or \'lower\'')
variables = OrderedDict()
for name, var in iteritems(self.variables):
if dim in var.dims:
if name in self.data_vars:
variables[name] = (var.isel(**kwargs_end) -
var.isel(**kwargs_start))
else:
variables[name] = var.isel(**kwargs_new)
else:
variables[name] = var
difference = self._replace_vars_and_dims(variables)
if n > 1:
return difference.diff(dim, n - 1)
else:
return difference
def shift(self, **shifts):
"""Shift this dataset by an offset along one or more dimensions.
Only data variables are moved; coordinates stay in place. This is
consistent with the behavior of ``shift`` in pandas.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
Returns
-------
shifted : Dataset
Dataset with the same coordinates and attributes but shifted data
variables.
See also
--------
roll
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', list('abcde'))})
>>> ds.shift(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 0 1 2 3 4
Data variables:
foo (x) object nan nan 'a' 'b' 'c'
"""
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
variables = OrderedDict()
for name, var in iteritems(self.variables):
if name in self.data_vars:
var_shifts = dict((k, v) for k, v in shifts.items()
if k in var.dims)
variables[name] = var.shift(**var_shifts)
else:
variables[name] = var
return self._replace_vars_and_dims(variables)
def roll(self, **shifts):
"""Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll rotates all variables, including coordinates. The
direction of rotation is consistent with :py:func:`numpy.roll`.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
Returns
-------
rolled : Dataset
Dataset with the same coordinates and attributes but rolled
variables.
See also
--------
shift
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', list('abcde'))})
>>> ds.roll(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 3 4 0 1 2
Data variables:
foo (x) object 'd' 'e' 'a' 'b' 'c'
"""
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
variables = OrderedDict()
for name, var in iteritems(self.variables):
var_shifts = dict((k, v) for k, v in shifts.items()
if k in var.dims)
variables[name] = var.roll(**var_shifts)
return self._replace_vars_and_dims(variables)
def sortby(self, variables, ascending=True):
"""
Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to the calling object prior to sorting by cell values.
NaNs are sorted to the end, following Numpy convention.
If multiple sorts along the same dimension is
given, numpy's lexsort is performed along that dimension:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html
and the FIRST key in the sequence is used as the primary sort key,
followed by the 2nd key, etc.
Parameters
----------
variables: str, DataArray, or list of either
1D DataArray objects or name(s) of 1D variable(s) in
coords/data_vars whose values are used to sort the dataset.
ascending: boolean, optional
Whether to sort by ascending or descending order.
Returns
-------
sorted: Dataset
A new dataset where all the specified dims are sorted by dim
labels.
"""
from .dataarray import DataArray
if not isinstance(variables, list):
variables = [variables]
else:
variables = variables
variables = [v if isinstance(v, DataArray) else self[v]
for v in variables]
aligned_vars = align(self, *variables, join='left')
aligned_self = aligned_vars[0]
aligned_other_vars = aligned_vars[1:]
vars_by_dim = defaultdict(list)
for data_array in aligned_other_vars:
if data_array.ndim != 1:
raise ValueError("Input DataArray is not 1-D.")
if (data_array.dtype == object and
LooseVersion(np.__version__) < LooseVersion('1.11.0')):
raise NotImplementedError(
'sortby uses np.lexsort under the hood, which requires '
'numpy 1.11.0 or later to support object data-type.')
(key,) = data_array.dims
vars_by_dim[key].append(data_array)
indices = {}
for key, arrays in vars_by_dim.items():
order = np.lexsort(tuple(reversed(arrays)))
indices[key] = order if ascending else order[::-1]
return aligned_self.isel(**indices)
def quantile(self, q, dim=None, interpolation='linear',
numeric_only=False, keep_attrs=False):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements for each variable
in the Dataset.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1 inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
Returns
-------
quantiles : Dataset
If `q` is a single quantile, then the result is a scalar for each
variable in data_vars. If multiple percentiles are given, first
axis of the result corresponds to the quantile and a quantile
dimension is added to the return Dataset. The other dimensions are
the dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanpercentile, pandas.Series.quantile, DataArray.quantile
"""
if isinstance(dim, basestring):
dims = set([dim])
elif dim is None:
dims = set(self.dims)
else:
dims = set(dim)
_assert_empty([dim for dim in dims if dim not in self.dims],
'Dataset does not contain the dimensions: %s')
q = np.asarray(q, dtype=np.float64)
variables = OrderedDict()
for name, var in iteritems(self.variables):
reduce_dims = [dim for dim in var.dims if dim in dims]
if reduce_dims or not var.dims:
if name not in self.coords:
if (not numeric_only or
np.issubdtype(var.dtype, np.number) or
var.dtype == np.bool_):
if len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
reduce_dims = None
variables[name] = var.quantile(
q, dim=reduce_dims, interpolation=interpolation)
else:
variables[name] = var
# construct the new dataset
coord_names = set(k for k in self.coords if k in variables)
attrs = self.attrs if keep_attrs else None
new = self._replace_vars_and_dims(variables, coord_names, attrs=attrs)
if 'quantile' in new.dims:
new.coords['quantile'] = Variable('quantile', q)
else:
new.coords['quantile'] = q
return new
@property
def real(self):
return self._unary_op(lambda x: x.real, keep_attrs=True)(self)
@property
def imag(self):
return self._unary_op(lambda x: x.imag, keep_attrs=True)(self)
def filter_by_attrs(self, **kwargs):
"""Returns a ``Dataset`` with variables that match specific conditions.
Can pass in ``key=value`` or ``key=callable``. Variables are returned
that contain all of the matches or callable returns True. If using a
callable note that it should accept a single parameter only,
the attribute value.
Parameters
----------
**kwargs : key=value
key : str
Attribute name.
value : callable or obj
If value is a callable, it should return a boolean in the form
of bool = func(attr) where attr is da.attrs[key].
Otherwise, value will be compared to the each
DataArray's attrs[key].
Returns
-------
new : Dataset
New dataset with variables filtered by attribute.
Examples
--------
>>> # Create an example dataset:
>>> import numpy as np
>>> import pandas as pd
>>> import xarray as xr
>>> temp = 15 + 8 * np.random.randn(2, 2, 3)
>>> precip = 10 * np.random.rand(2, 2, 3)
>>> lon = [[-99.83, -99.32], [-99.79, -99.23]]
>>> lat = [[42.25, 42.21], [42.63, 42.59]]
>>> dims = ['x', 'y', 'time']
>>> temp_attr = dict(standard_name='air_potential_temperature')
>>> precip_attr = dict(standard_name='convective_precipitation_flux')
>>> ds = xr.Dataset({
... 'temperature': (dims, temp, temp_attr),
... 'precipitation': (dims, precip, precip_attr)},
... coords={
... 'lon': (['x', 'y'], lon),
... 'lat': (['x', 'y'], lat),
... 'time': pd.date_range('2014-09-06', periods=3),
... 'reference_time': pd.Timestamp('2014-09-05')})
>>> # Get variables matching a specific standard_name.
>>> ds.filter_by_attrs(standard_name='convective_precipitation_flux')
<xarray.Dataset>
Dimensions: (time: 3, x: 2, y: 2)
Coordinates:
* x (x) int64 0 1
* time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08
lat (x, y) float64 42.25 42.21 42.63 42.59
* y (y) int64 0 1
reference_time datetime64[ns] 2014-09-05
lon (x, y) float64 -99.83 -99.32 -99.79 -99.23
Data variables:
precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ...
>>> # Get all variables that have a standard_name attribute.
>>> standard_name = lambda v: v is not None
>>> ds.filter_by_attrs(standard_name=standard_name)
<xarray.Dataset>
Dimensions: (time: 3, x: 2, y: 2)
Coordinates:
lon (x, y) float64 -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 42.25 42.21 42.63 42.59
* x (x) int64 0 1
* y (y) int64 0 1
* time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08
reference_time datetime64[ns] 2014-09-05
Data variables:
temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ...
precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ...
"""
selection = []
for var_name, variable in self.data_vars.items():
for attr_name, pattern in kwargs.items():
attr_value = variable.attrs.get(attr_name)
if ((callable(pattern) and pattern(attr_value)) or
attr_value == pattern):
selection.append(var_name)
return self[selection]
ops.inject_all_ops_and_reduce_methods(Dataset, array_only=False)
|
apache-2.0
|
hainm/scikit-learn
|
examples/neural_networks/plot_rbm_logistic_classification.py
|
258
|
4609
|
"""
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
VHarisop/Parallel
|
ex1/report/omp_results/plots.py
|
2
|
1127
|
#!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['KerkisSans']})
try:
msize = sys.argv[1]
except IndexError:
sys.stderr.write("Usage: ./plots.py size\n")
exit(0)
keys = {"sdl": 'Seidel SOR', "rbl": 'Red-Black SOR', "tjb": 'Tiled Jacobi'}
for key in keys:
with open('scl{0}{1}.dat'.format(key, msize), 'r') as f:
vals = [tuple(map(lambda x: float(x), i.split())) for i in f]
ind = np.arange(len(vals))
width = 0.4
# if you don't want bars to overlap, set displ = width
displ = width
fig, ax = plt.subplots()
bf_1 = ax.bar(ind + 1, [i[0] for i in vals], width, color='r', label='Total')
bf_2 = ax.bar(ind + 1 + displ, [i[1] for i in vals], width, color='y', label='Computation')
# set titles and graphics
plt.xlabel('Threads')
plt.ylabel('Time')
plt.title('{0} - {1}x{1}'.format(keys[key], msize))
plt.legend()
plt.savefig('bars{}{}.png'.format(key, msize), ext="png")
|
gpl-2.0
|
jiwang576/incubator-airflow
|
airflow/hooks/presto_hook.py
|
22
|
3617
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
import logging
from pyhive import presto
from pyhive.exc import DatabaseError
from airflow.hooks.dbapi_hook import DbApiHook
logging.getLogger("pyhive").setLevel(logging.INFO)
class PrestoException(Exception):
pass
class PrestoHook(DbApiHook):
"""
Interact with Presto through PyHive!
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id)
return presto.connect(
host=db.host,
port=db.port,
username=db.login,
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema)
@staticmethod
def _strip_sql(sql):
return sql.strip().rstrip(';')
def _get_pretty_exception_message(self, e):
"""
Parses some DatabaseError to provide a better error message
"""
if (hasattr(e, 'message')
and 'errorName' in e.message
and 'message' in e.message):
return ('{name}: {message}'.format(
name=e.message['errorName'],
message=e.message['message']))
else:
return str(e)
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super(PrestoHook, self).get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(self._parse_exception_message(e))
def get_first(self, hql, parameters=None):
"""
Returns only the first row, regardless of how many rows the query
returns.
"""
try:
return super(PrestoHook, self).get_first(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(self._parse_exception_message(e))
def get_pandas_df(self, hql, parameters=None):
"""
Get a pandas dataframe from a sql query.
"""
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self._strip_sql(hql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(self._parse_exception_message(e))
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame()
return df
def run(self, hql, parameters=None):
"""
Execute the statement against Presto. Can be used to create views.
"""
return super(PrestoHook, self).run(self._strip_sql(hql), parameters)
def insert_rows(self):
raise NotImplementedError()
|
apache-2.0
|
pandeyadarsh/sympy
|
examples/intermediate/mplot3d.py
|
93
|
1252
|
#!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=2, cstride=2)
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
|
bsd-3-clause
|
RapidLzj/Scheduler
|
plotmap.py
|
1
|
3299
|
# -*- coding: utf-8 -*-
"""
Module plotmap : part of SAGE Digital Sky Survey Observation Scheduler
v 1.0 : By Jie Zheng, 201607, Tucson, AZ, USA
Draw a sky map.
"""
import numpy as np
from Mollweide import moll
from matplotlib import pyplot as plt
import common
import schdutil
def plotmap (ra, de, tag, title="", epsfile=None, pngfile=None,
spos=None, mpos=None, zenith=None,
xcolormark=None, xlabel=None) :
""" Plot a map about fields, different tag have different color and marker.
args:
ra: ndarray of ra
de: ndarray of dec
tag: ndarray of tag
title: title of map
epsfile: eps filename to save, if None, do not save as eps
pngfile: png filename to save, if None, do not save as png
mpos: tuple of ra and dec of moon position, if none, do not mark
spos: tuple of ra and dec of sun position, if none, do not mark
zenith: tuple of ra and dec of zenith, if none, do not mark
xcolormark: dict of color and mark for different tag, if none, use default
xlabel: dict of labels for each tag, if none, use default
"""
# handling default arguments
if xcolormark is None :
xcolormark = {
0x00:"k,", # unobserved and not planed
0x01:"k+", # partly observed and not planned
0x02:"go", # finished
0x03:"rs", # planed in this night
0x04:"m+", # candidate0
0x05:"m+", # candidate1
0x07:"bs", # latest added
0x10:"y,", # Sun or Moon neighbourhood
0x11:"y+", #
0x12:"yo", #
0x1F:"b,", # skiped
}
if xlabel is None :
xlabel = {
0x00:"Unobserved", # 00000
0x01:"Partly obsed", # 00001
0x02:"Finished", # 00010
0x03:"Tonight", # 00011
0x04:"Candidate", # 00100
0x05: None, # 00101
0x07:"New Selection",# 00111
0x10:"Near Sun/Moon",# 10000
0x11:"Near Sun/Moon",# 10001
0x12:"Near Sun/Moon",# 10010
0x1F:"Skiped", # 11111
}
# plot
equf = moll(lat_range=(-5,88), xsize=180, ysize=90, lon_center=30.0)
plt.figure(figsize=(18,10))
equf.grid(lat_lab_lon=240, lon_lab_lat=-5, lat_step=10)
for t in set(tag):
ix = np.where(tag == t)
equf.scatter(ra[ix], de[ix], xcolormark[t], label=xlabel[t])
if spos is not None :
x, y = equf.project(spos[0], spos[1])
plt.plot(x, y, "ro", markersize=10.0,
label="Sun {ra:5.1f} {de:5.1f}".format(ra=spos[0], de=spos[1]))
if mpos is not None :
x, y = equf.project(mpos[0], mpos[1])
plt.plot(x, y, "rD", markersize=10.0,
label="Moon {ra:5.1f} {de:5.1f}".format(ra=mpos[0], de=mpos[1]))
if zenith is not None :
x, y = equf.project(zenith[0], zenith[1])
plt.plot(x, y, "r^", markersize=10.0,
label="Zenith {ra:5.1f} {de:5.1f}".format(ra=zenith[0], de=zenith[1]))
plt.legend()
plt.title("{title} Observation Plan".format(title=title))
if epsfile is not None : plt.savefig(epsfile)
if pngfile is not None : plt.savefig(pngfile)
plt.close()
|
gpl-3.0
|
MKLab-ITI/news-popularity-prediction
|
news_popularity_prediction/visualization/snow_2016_workshop/slashdot_results.py
|
1
|
28461
|
__author__ = 'Georgios Rizos ([email protected])'
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from news_popularity_prediction.visualization.snow_2016_workshop.common import get_results_file_paths, handle_nan,\
get_method_name_to_legend_name_dict, add_results
matplotlib.rcParams["ps.useafm"] = True
matplotlib.rcParams["pdf.use14corefonts"] = True
matplotlib.rcParams["text.usetex"] = True
def read_slashdot_result_data(output_folder, method_name_list):
slashdot_mse = dict()
slashdot_jaccard = dict()
feature_names = dict()
slashdot_k_list = None
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "mean"
METHOD = "Baseline Mean"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "median"
METHOD = "Baseline Median"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "comments"
METHOD = "Baseline Comments"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "users"
METHOD = "Baseline Users"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "comments_users"
METHOD = "Baseline Comments + Users"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "simple graph"
METHOD = "Simple Graph"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = True
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "Temporal"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = True
CONFIGURATION_DICT["add_usergraph_features"] = True
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "Comment Tree + User Graph"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = True
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "Comment Tree"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = True
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "User Graph"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "slashdot"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = True
CONFIGURATION_DICT["add_usergraph_features"] = True
CONFIGURATION_DICT["add_temporal_features"] = True
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "All"
if METHOD in method_name_list:
slashdot_k_list = add_results(mse_results=slashdot_mse,
jaccard_results=slashdot_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
return slashdot_mse, slashdot_jaccard, slashdot_k_list
########################################################################################################################
def read_barrapunto_result_data(output_folder, method_name_list):
barrapunto_mse = dict()
barrapunto_jaccard = dict()
feature_names = dict()
barrapunto_k_list = None
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "mean"
METHOD = "Baseline Mean"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "median"
METHOD = "Baseline Median"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "comments"
METHOD = "Baseline Comments"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "users"
METHOD = "Baseline Users"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "comments_users"
METHOD = "Baseline Comments + Users"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
CONFIGURATION_DICT["baseline"] = "simple graph"
METHOD = "Simple Graph"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = True
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "Temporal"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = True
CONFIGURATION_DICT["add_usergraph_features"] = True
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "Comment Tree + User Graph"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = True
CONFIGURATION_DICT["add_usergraph_features"] = False
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "Comment Tree"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = False
CONFIGURATION_DICT["add_usergraph_features"] = True
CONFIGURATION_DICT["add_temporal_features"] = False
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "User Graph"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
CONFIGURATION_DICT = dict()
CONFIGURATION_DICT["target_name_list"] = ["comments", "users"]
CONFIGURATION_DICT["osn_name_focus"] = "barrapunto"
CONFIGURATION_DICT["target_osn_name"] = "slashdot"
CONFIGURATION_DICT["feature_osn_name_list"] = ["slashdot"]
CONFIGURATION_DICT["add_branching_features"] = True
CONFIGURATION_DICT["add_usergraph_features"] = True
CONFIGURATION_DICT["add_temporal_features"] = True
CONFIGURATION_DICT["add_author_features"] = False
METHOD = "All"
if METHOD in method_name_list:
barrapunto_k_list = add_results(mse_results=barrapunto_mse,
jaccard_results=barrapunto_jaccard,
feature_names=feature_names,
feature_name_offset=9,
method=METHOD,
target_name_list=CONFIGURATION_DICT["target_name_list"],
results_file_paths=get_results_file_paths(output_folder, CONFIGURATION_DICT))
return barrapunto_mse, barrapunto_jaccard, barrapunto_k_list
def make_slashdot_figures(output_path_prefix, method_name_list, slashdot_mse, slashdot_jaccard, slashdot_k_list):
sns.set_style("darkgrid")
sns.set_context("paper")
translator = get_method_name_to_legend_name_dict()
slashdot_k_list = list(slashdot_k_list)
fig, axes = plt.subplots(1, 2, sharex=True)
axes[0].set_title("SlashDot Comments")
axes[1].set_title("SlashDot Users")
plt.locator_params(nbins=8)
# Comments
for m, method in enumerate(method_name_list):
axes[0].set_ylabel("MSE")
axes[0].set_xlabel("Lifetime (sec)")
axes[0].plot(slashdot_k_list[1:],
handle_nan(slashdot_mse[method]["comments"].mean(axis=1))[1:],
label=translator[method])
# Users
for m, method in enumerate(method_name_list):
# axes[1].set_ylabel("MSE")
axes[1].set_xlabel("Lifetime (sec)")
axes[1].plot(slashdot_k_list[1:],
handle_nan(slashdot_mse[method]["users"].mean(axis=1))[1:],
label=translator[method])
axes[1].legend(loc="upper right")
# plt.show()
plt.savefig(output_path_prefix + "_mse_slashdot_SNOW" + ".png", format="png")
plt.savefig(output_path_prefix + "_mse_slashdot_SNOW" + ".eps", format="eps")
def make_barrapunto_figures(output_path_prefix, method_name_list, barrapunto_mse, barrapunto_jaccard, barrapunto_k_list):
sns.set_style("darkgrid")
sns.set_context("paper")
translator = get_method_name_to_legend_name_dict()
barrapunto_k_list = list(barrapunto_k_list)
fig, axes = plt.subplots(1, 2, sharex=True)
axes[0].set_title("BarraPunto Comments")
axes[1].set_title("BarraPunto Users")
plt.locator_params(nbins=8)
# Comments
for m, method in enumerate(method_name_list):
axes[0].set_ylabel("MSE")
axes[0].set_xlabel("Lifetime (sec)")
axes[0].plot(barrapunto_k_list[1:],
handle_nan(barrapunto_mse[method]["comments"].mean(axis=1))[1:],
label=translator[method])
# Users
for m, method in enumerate(method_name_list):
# axes[1].set_ylabel("MSE")
axes[1].set_xlabel("Lifetime (sec)")
axes[1].plot(barrapunto_k_list[1:],
handle_nan(barrapunto_mse[method]["users"].mean(axis=1))[1:],
label=translator[method])
axes[1].legend(loc="upper right")
# plt.show()
plt.savefig(output_path_prefix + "_mse_barrapunto_SNOW" + ".png", format="png")
plt.savefig(output_path_prefix + "_mse_barrapunto_SNOW" + ".eps", format="eps")
|
apache-2.0
|
davidgardenier/frbpoppy
|
tests/rates/alpha_analytical.py
|
1
|
1884
|
"""Use simple rate comparisions, try predicting event rates."""
import numpy as np
import matplotlib.pyplot as plt
from frbpoppy import Survey
from tests.convenience import plot_aa_style, rel_path
ALPHAS = np.around(np.linspace(-0.2, -2.5, 7), decimals=2)
SURVEYS = ('parkes-htru', 'arecibo-palfa', 'askap-fly', 'fast-crafts')
def compare_surveys(surv1, surv2, alpha):
"""Event rate surv1 / Event rate surv2 for an alpha."""
omega = surv1.beam_size_at_fwhm/surv2.beam_size_at_fwhm
T_rec = surv1.T_rec/surv2.T_rec
gain = surv1.gain/surv2.gain
beta = surv1.beta/surv2.beta
SEFD = T_rec*beta/gain
bw = surv1.bw/surv2.bw
S_min = surv1.snr_limit/surv2.snr_limit
return omega * (SEFD * S_min)**alpha * (bw)**(-alpha/2)
def analytical_rates(surveys=SURVEYS, alphas=ALPHAS):
"""Use a analytical model to scale detection rates to various alphas."""
rates = {}
for surv in surveys:
# Get survey parameters
surv1 = Survey(surv)
surv1.set_beam('perfect', n_sidelobes=0.5)
surv2 = Survey(surveys[0])
surv2.set_beam('perfect', n_sidelobes=0.5)
# Calculate rate per alpha
rate = []
for alpha in alphas:
rate.append(compare_surveys(surv1, surv2, alpha))
rates[surv] = rate
return rates
def main():
"""Plot analytical rates."""
rates = analytical_rates()
for surv in rates:
rate = rates[surv]
plot_aa_style()
plt.plot(ALPHAS, rate, label=surv)
plt.xlabel(r'$\alpha_{\text{in}}$')
plt.ylabel(rf'Events / {SURVEYS[0]}')
plt.xlim((min(ALPHAS), max(ALPHAS)))
plt.yscale('log')
plt.legend()
plt.gca().invert_xaxis()
plt.grid(True)
plt.tight_layout()
plt.savefig(rel_path('./plots/rates_analytical.pdf'))
if __name__ == '__main__':
main()
|
mit
|
jaidevd/truepositive
|
main.py
|
1
|
4946
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.
"""
Write drunk, edit sober
"""
import sys
import pandas as pd
import os.path as op
from PyQt4 import QtCore, QtGui
from import_wizard import QImportWizard
from data_frame_model import DataFrameModel
from q_canvas import QCanvas
from enhanced_table_view import QEnhancedTableView
import numpy as np
import matplotlib.pyplot as plt
class MyFileDialog(QtGui.QFileDialog):
accepted = QtCore.Signal(QtCore.QObject)
def __init__(self, parent=None):
super(MyFileDialog, self).__init__()
self.parent = parent
def accept(self):
super(MyFileDialog, self).accept()
setattr(self.parent, "filepath", self.selectedFiles()[0])
self.accepted.emit(self.parent)
class MainWindow(QtGui.QMainWindow):
def __init__(self, filepath=None):
super(MainWindow, self).__init__()
self.getXYPlotArea()
self.getHistPlotArea()
self.getBarPlotArea()
self.filepath = filepath
self.tableView = QEnhancedTableView(self, self.ax, self.histAx,
self.barAx)
self.tabbedArea = QtGui.QTabWidget()
self.tabbedArea.addTab(self.xyCanvas, "XY Plots")
histWidget = QtGui.QWidget(self.tabbedArea)
histLayout = QtGui.QHBoxLayout()
self.binSlider = QtGui.QSlider(QtCore.Qt.Vertical, parent=histWidget)
self.binSlider.setRange(10, 100)
self.binSlider.setSingleStep(10)
self.binSlider.setTickPosition(QtGui.QSlider.TicksRight)
self.binSlider.setTracking(False)
self.binSlider.valueChanged.connect(self.tableView.redrawHistogram)
histLayout.addWidget(self.histCanvas)
histLayout.addWidget(self.binSlider)
histWidget.setLayout(histLayout)
self.tabbedArea.addTab(histWidget, "Histogram")
self.tabbedArea.addTab(self.barCanvas, "Bar Charts")
centralSplitter = QtGui.QSplitter(self)
centralSplitter.addWidget(self.tableView)
centralSplitter.addWidget(self.tabbedArea)
self.setCentralWidget(centralSplitter)
self.parserKwargs = {}
self.readCsv()
# Menu Bar
self.openFileDialog = MyFileDialog(parent=self)
self.openFileDialog.accepted.connect(self.showImportWizard)
menuBar = QtGui.QMenuBar()
fileMenu = QtGui.QMenu("&File", parent=menuBar)
openAct = fileMenu.addAction("Open")
openAct.triggered.connect(self.openFileDialog.exec_)
menuBar.addMenu(fileMenu)
self.setMenuBar(menuBar)
def readCsv(self):
df = pd.read_csv(self.filepath, **self.parserKwargs)
self.dataFrameModel = DataFrameModel(df)
self.tableView.setModel(self.dataFrameModel)
def showImportWizard(self):
self.importWiz = QImportWizard(self)
result = self.importWiz.exec_()
if result == QtGui.QDialog.Accepted:
self.makeParserKwargs()
self.readCsv()
def makeParserKwargs(self):
self.parserKwargs = {"sep": self.importWiz.SEP,
"index_col": self.importWiz.INDEX_COL,
"engine": self.importWiz.PARSER_ENGINE,
"usecols": self.importWiz.USECOLS,
"nrows": self.importWiz.NROWS,
"parse_dates": self.importWiz.DATETIME_COLS}
def getXYPlotArea(self):
self.getExampleFigure()
self.xyCanvas = QCanvas(self.fig, self)
def getHistPlotArea(self):
self.getExampleHist()
self.histCanvas = QCanvas(self.histFig, self)
def getBarPlotArea(self):
self.getExampleBar()
self.barCanvas = QCanvas(self.barFig, self)
def getExampleFigure(self):
x = np.linspace(-2*np.pi, 2*np.pi, 1000)
y = np.sin(x)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(False)
ax.plot(x, y)
self.ax = ax
self.fig = fig
def getExampleHist(self):
x = np.random.random(1000)
histFig = plt.figure()
histAx = histFig.add_subplot(111)
histAx.hold(False)
histAx.hist(x, 100)
self.x = x
self.histAx = histAx
self.histFig = histFig
def getExampleBar(self):
x = ['foo', 'bar', 'baz']
y = [5, 10, 15]
barFig = plt.figure()
barAx = barFig.add_subplot(111)
barAx.hold(False)
barAx.bar(range(len(x)), y)
barAx.set_xticks(np.arange(3) + 0.8)
barAx.set_xticklabels(x)
self.barAx = barAx
self.barFig = barFig
if __name__ == '__main__':
filepath = op.join(op.dirname(__file__), "iris.csv")
app = QtGui.QApplication(sys.argv)
window = MainWindow(filepath)
window.show()
sys.exit(app.exec_())
|
mit
|
LogicalKnight/scipy-cluster
|
hcluster/hierarchy.py
|
6
|
98963
|
"""
Function Reference
------------------
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
+------------------+-------------------------------------------------+
|*Function* | *Description* |
+------------------+-------------------------------------------------+
|fcluster |forms flat clusters from hierarchical clusters. |
+------------------+-------------------------------------------------+
|fclusterdata |forms flat clusters directly from data. |
+------------------+-------------------------------------------------+
|leaders |singleton root nodes for flat cluster. |
+------------------+-------------------------------------------------+
These are routines for agglomerative clustering.
+------------------+-------------------------------------------------+
|*Function* | *Description* |
+------------------+-------------------------------------------------+
|linkage |agglomeratively clusters original observations. |
+------------------+-------------------------------------------------+
|single |the single/min/nearest algorithm. (alias) |
+------------------+-------------------------------------------------+
|complete |the complete/max/farthest algorithm. (alias) |
+------------------+-------------------------------------------------+
|average |the average/UPGMA algorithm. (alias) |
+------------------+-------------------------------------------------+
|weighted |the weighted/WPGMA algorithm. (alias) |
+------------------+-------------------------------------------------+
|centroid |the centroid/UPGMC algorithm. (alias) |
+------------------+-------------------------------------------------+
|median |the median/WPGMC algorithm. (alias) |
+------------------+-------------------------------------------------+
|ward |the Ward/incremental algorithm. (alias) |
+------------------+-------------------------------------------------+
These routines compute statistics on hierarchies.
+------------------+-------------------------------------------------+
|*Function* | *Description* |
+------------------+-------------------------------------------------+
|cophenet |computes the cophenetic distance between leaves. |
+------------------+-------------------------------------------------+
|from_mlab_linkage |converts a linkage produced by MATLAB(TM). |
+------------------+-------------------------------------------------+
|inconsistent |the inconsistency coefficients for cluster. |
+------------------+-------------------------------------------------+
|maxinconsts |the maximum inconsistency coefficient for each |
| |cluster. |
+------------------+-------------------------------------------------+
|maxdists |the maximum distance for each cluster. |
+------------------+-------------------------------------------------+
|maxRstat |the maximum specific statistic for each cluster. |
+------------------+-------------------------------------------------+
|to_mlab_linkage |converts a linkage to one MATLAB(TM) can |
| |understand. |
+------------------+-------------------------------------------------+
Routines for visualizing flat clusters.
+------------------+-------------------------------------------------+
|*Function* | *Description* |
+------------------+-------------------------------------------------+
|dendrogram |visualizes linkages (requires matplotlib). |
+------------------+-------------------------------------------------+
These are data structures and routines for representing hierarchies as
tree objects.
+------------------+-------------------------------------------------+
|*Function* | *Description* |
+------------------+-------------------------------------------------+
|ClusterNode |represents cluster nodes in a cluster hierarchy. |
+------------------+-------------------------------------------------+
|leaves_list |a left-to-right traversal of the leaves. |
+------------------+-------------------------------------------------+
|to_tree |represents a linkage matrix as a tree object. |
+------------------+-------------------------------------------------+
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
+------------------+-------------------------------------------------+
|*Function* | *Description* |
+------------------+-------------------------------------------------+
|is_valid_im |checks for a valid inconsistency matrix. |
+------------------+-------------------------------------------------+
|is_valid_linkage |checks for a valid hierarchical clustering. |
+------------------+-------------------------------------------------+
|is_isomorphic |checks if two flat clusterings are isomorphic. |
+------------------+-------------------------------------------------+
|is_monotonic |checks if a linkage is monotonic. |
+------------------+-------------------------------------------------+
|correspond |checks whether a condensed distance matrix |
| |corresponds with a linkage |
+------------------+-------------------------------------------------+
|num_obs_linkage |the number of observations corresponding to a |
| |linkage matrix. |
+------------------+-------------------------------------------------+
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
References
----------
.. [Sta07] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [Mti07] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/HierarchicalClustering.html.
Accessed October 1, 2007.
.. [Gow69] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [War63] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [Joh66] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [Sne62] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [Bat95] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [Sok58] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [Ede79] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [Jai88] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [Fis36] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
Copyright Notice
----------------
Copyright (C) Damian Eads, 2007-2008. New BSD License.
"""
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import _hierarchy_wrap, types
import hcluster.distance as distance
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
try:
import warnings
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
except:
def _warning(s):
print ('[WARNING] scipy.cluster: %s' % s)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
:Parameters:
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
:Returns:
Z : ndarray
The linkage matrix.
:SeeAlso:
- linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete complete/max/farthest point linkage on the
condensed distance matrix ``y``. See ``linkage`` for more
information on the return structure and algorithm.
:Parameters:
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
:Returns:
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
:Parameters:
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
:Returns:
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
:SeeAlso:
- linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
:Parameters:
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
:Returns:
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
:SeeAlso:
- linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage. See ``linkage`` for more
information on the return structure and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
:Parameters:
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
:Returns:
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
:SeeAlso:
- linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage. See ``linkage`` for more
information on the return structure and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
:Parameters:
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
:Returns:
- Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
:SeeAlso:
- linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance
matrix. See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
:Parameters:
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
:Returns:
- Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
:SeeAlso:
- linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
r"""
Performs hierarchical/agglomerative clustering on the
condensed distance matrix y. y must be a :math:`{n \choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB(TM) linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \sum_{ij} \frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm. This is called UPGMA.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \sqrt{\frac{|v|+|s|}
{T}d(v,s)^2
+ \frac{|v|+|t|}
{T}d(v,t)^2
+ \frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there may
be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB(TM)
version.
:Parameters:
- Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as
a :math:`m` by :math:`n` array.
- method : string
The linkage algorithm to use. See the ``Linkage Methods``
section below for full descriptions.
- metric : string
The distance metric to use. See the ``distance.pdist``
function for a list of valid distance metrics.
:Returns:
- Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, str):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods.keys():
raise ValueError("Valid methods when the raw observations are omitted are 'single', 'complete', 'weighted', and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
_hierarchy_wrap.linkage_wrap(y, Z, int(d), \
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
m = s[1]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods.keys():
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy_wrap.linkage_wrap(dm, Z, n, \
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods.keys():
if metric != 'euclidean':
raise ValueError('Method %s requires the distance metric to be euclidean' % s)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy_wrap.linkage_euclid_wrap(dm, Z, X, m, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster. Leaf nodes correspond
to original observations, while non-leaf nodes correspond to
non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
:SeeAlso:
- to_tree: for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted. This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
r"""
The identifier of the target node. For :math:`0 \leq i < n`,
:math:`i` corresponds to original observation
:math:`i`. For :math:`n \leq i` < :math:`2n-1`,
:math:`i` corresponds to non-singleton cluster formed at
iteration :math:`i-n`.
:Returns:
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
:Returns:
c : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Returns a reference to the left child tree object. If the node
is a leaf, None is returned.
:Returns:
left : ClusterNode
The left child of the target node.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object. If the node
is a leaf, None is returned.
:Returns:
right : ClusterNode
The left child of the target node.
"""
return self.right
def is_leaf(self):
"""
Returns True iff the target node is a leaf.
:Returns:
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs preorder traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement:
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
:Parameters:
- func : function
Applied to each leaf ClusterNode object in the pre-order
traversal. Given the i'th leaf node in the pre-order
traversal ``n[i]``, the result of func(n[i]) is stored in
L[i]. If not provided, the index of the original observation
to which the node corresponds is used.
:Returns:
- L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = np.zeros((2 * n,), dtype=bool)
rvisited = np.zeros((2 * n,), dtype=bool)
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if not lvisited[ndid]:
curNode[k + 1] = nd.left
lvisited[ndid] = True
k = k + 1
elif not rvisited[ndid]:
curNode[k + 1] = nd.right
rvisited[ndid] = True
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object. The reference r to the
root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
:Parameters:
- Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
- r : bool
When ``False``, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
:Returns:
- L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n*2-1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError('Corrupt matrix Z. Index to derivative cluster is used before it is formed. See row %d, column 0' % fi)
if fj > i + n:
raise ValueError('Corrupt matrix Z. Index to derivative cluster is used before it is formed. See row %d, column 1' % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i,3] != nd.count:
raise ValueError('Corrupt matrix Z. The count Z[%d,3] is incorrect.' % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = np.bool_(X)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = np.double(X)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
:Parameters:
- Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
- Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix ``Z``
of a set of :math:`n` observations in :math:`m`
dimensions. ``Y`` is the condensed distance matrix from which
``Z`` was generated.
:Returns: (c, {d})
- c : ndarray
The cophentic correlation distance (if ``y`` is passed).
- d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n*(n-1)/2,), dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy_wrap.cophenetic_distances_wrap(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
Ys = Y.shape
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
#print Yy.shape, Zz.shape
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
#print c, numerator.sum()
return (c, zz)
def inconsistent(Z, d=2):
r"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
:Parameters:
- d : int
The number of links up to ``d`` levels below each
non-singleton cluster
- Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
:Returns:
- R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math::
\frac{\mathtt{Z[i,2]}-\mathtt{R[i,0]}}
{R[i,1]}.
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative integer value.')
# if d == 0:
# d = 1
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy_wrap.inconsistent_wrap(Z, R, int(n), int(d));
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module. The conversion does
two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
:Arguments:
- Z : ndarray
A linkage matrix generated by MATLAB(TM)
:Returns:
- ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N');
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy_wrap.calculate_cluster_sizes_wrap(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
:Arguments:
- Z : ndarray
A linkage matrix generated by this library.
:Returns:
- ZM : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:,0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns ``True`` if the linkage passed is monotonic. The linkage
is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
:Arguments:
- Z : ndarray
The linkage matrix to check for monotonicity.
:Returns:
- b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:,2]>=Z[:-1,2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""
Returns True if the inconsistency matrix passed is valid. It must
be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
:Arguments:
- R : ndarray
The inconsistency matrix to check for validity.
- warning : bool
When ``True``, issues a Python warning if the linkage
matrix passed is invalid.
- throw : bool
When ``True``, throws a Python exception if the linkage
matrix passed is invalid.
- name : string
This string refers to the variable name of the invalid
linkage matrix.
:Returns:
- b : bool
True iff the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
try:
if type(R) != np.ndarray:
if name:
raise TypeError('Variable \'%s\' passed as inconsistency matrix is not a numpy array.' % name)
else:
raise TypeError('Variable passed as inconsistency matrix is not a numpy array.')
if R.dtype != np.double:
if name:
raise TypeError('Inconsistency matrix \'%s\' must contain doubles (double).' % name)
else:
raise TypeError('Inconsistency matrix must contain doubles (double).')
if len(R.shape) != 2:
if name:
raise ValueError('Inconsistency matrix \'%s\' must have shape=2 (i.e. be two-dimensional).' % name)
else:
raise ValueError('Inconsistency matrix must have shape=2 (i.e. be two-dimensional).')
if R.shape[1] != 4:
if name:
raise ValueError('Inconsistency matrix \'%s\' must have 4 columns.' % name)
else:
raise ValueError('Inconsistency matrix must have 4 columns.')
if R.shape[0] < 1:
if name:
raise ValueError('Inconsistency matrix \'%s\' must have at least one row.' % name)
else:
raise ValueError('Inconsistency matrix must have at least one row.')
if (R[:, 0] < 0).any():
if name:
raise ValueError('Inconsistency matrix \'%s\' contains negative link height means.' % name)
else:
raise ValueError('Inconsistency matrix contains negative link height means.')
if (R[:, 1] < 0).any():
if name:
raise ValueError('Inconsistency matrix \'%s\' contains negative link height standard deviations.' % name)
else:
raise ValueError('Inconsistency matrix contains negative link height standard deviations.')
if (R[:, 2] < 0).any():
if name:
raise ValueError('Inconsistency matrix \'%s\' contains negative link counts.' % name)
else:
raise ValueError('Inconsistency matrix contains negative link counts.')
except Exception, e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
r"""
Checks the validity of a linkage matrix. A linkage matrix is valid
if it is a two dimensional nd-array (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \leq \mathtt{Z[i,0]} \leq i+n-1`
and :math:`0 \leq Z[i,1] \leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
:Arguments:
- warning : bool
When ``True``, issues a Python warning if the linkage
matrix passed is invalid.
- throw : bool
When ``True``, throws a Python exception if the linkage
matrix passed is invalid.
- name : string
This string refers to the variable name of the invalid
linkage matrix.
:Returns:
- b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
try:
if type(Z) != np.ndarray:
if name:
raise TypeError('\'%s\' passed as a linkage is not a valid array.' % name)
else:
raise TypeError('Variable is not a valid array.')
if Z.dtype != np.double:
if name:
raise TypeError('Linkage matrix \'%s\' must contain doubles.' % name)
else:
raise TypeError('Linkage matrix must contain doubles.')
if len(Z.shape) != 2:
if name:
raise ValueError('Linkage matrix \'%s\' must have shape=2 (i.e. be two-dimensional).' % name)
else:
raise ValueError('Linkage matrix must have shape=2 (i.e. be two-dimensional).')
if Z.shape[1] != 4:
if name:
raise ValueError('Linkage matrix \'%s\' must have 4 columns.' % name)
else:
raise ValueError('Linkage matrix must have 4 columns.')
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:,0] < 0).any() or
(Z[:,1] < 0).any()):
if name:
raise ValueError('Linkage \'%s\' contains negative indices.' % name)
else:
raise ValueError('Linkage contains negative indices.')
if (Z[:, 2] < 0).any():
if name:
raise ValueError('Linkage \'%s\' contains negative distances.' % name)
else:
raise ValueError('Linkage contains negative distances.')
if (Z[:, 3] < 0).any():
if name:
raise ValueError('Linkage \'%s\' contains negative counts.' % name)
else:
raise ValueError('Linkage contains negative counts.')
if _check_hierarchy_uses_cluster_before_formed(Z):
if name:
raise ValueError('Linkage \'%s\' uses non-singleton cluster before its formed.' % name)
else:
raise ValueError('Linkage uses non-singleton cluster before its formed.')
if _check_hierarchy_uses_cluster_more_than_once(Z):
if name:
raise ValueError('Linkage \'%s\' uses the same cluster more than once.' % name)
else:
raise ValueError('Linkage uses the same cluster more than once.')
# if _check_hierarchy_not_all_clusters_used(Z):
# if name:
# raise ValueError('Linkage \'%s\' does not use all clusters.' % name)
# else:
# raise ValueError('Linkage does not use all clusters.')
except Exception, e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
:Arguments:
- Z : ndarray
The linkage matrix on which to perform the operation.
:Returns:
- n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks if a linkage matrix ``Z`` and condensed distance matrix
``Y`` could possibly correspond to one another.
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
:Arguments:
- Z : ndarray
The linkage matrix to check for correspondance.
- Y : ndarray
The condensed distance matrix to check for correspondance.
:Returns:
- b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``. The threshold ``t`` is a required parameter.
:Arguments:
- Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the ``linkage`` function.
- t : double
The threshold to apply when forming flat clusters.
- criterion : string (optional)
The criterion to use in forming flat clusters. This can
be any of the following values:
* 'inconsistent': If a cluster node and all its
decendents have an inconsistent value less than or equal
to ``t`` then all its leaf descendents belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
* 'distance': Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than ``t``.
* 'maxclust': Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than ``t`` flat clusters are formed.
* 'monocrit': Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
* 'maxclust_monocrit': Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
- depth : int (optional)
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. (default=2)
- R : ndarray (optional)
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
- monocrit : ndarray (optional)
A ``(n-1)`` numpy vector of doubles. ``monocrit[i]`` is the
statistics upon which non-singleton ``i`` is thresholded. The
monocrit vector must be monotonic, i.e. given a node ``c`` with
index ``i``, for all node indices j corresponding to nodes
below ``c``, ``monocrit[i] >= monocrit[j]``.
:Returns:
- T : ndarray
A vector of length ``n``. ``T[i]`` is the flat cluster number to
which original observation ``i`` belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy_wrap.cluster_in_wrap(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy_wrap.cluster_dist_wrap(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy_wrap.cluster_maxclust_dist_wrap(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy_wrap.cluster_monocrit_wrap(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy_wrap.cluster_maxclust_monocrit_wrap(Z, monocrit, T,
int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s' % str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent', \
metric='euclidean', depth=2, method='single', R=None):
"""
``T = fclusterdata(X, t)``
Clusters the original observations in the ``n`` by ``m`` data
matrix ``X`` (``n`` observations in ``m`` dimensions), using the
euclidean distance metric to calculate distances between original
observations, performs hierarchical clustering using the single
linkage algorithm, and forms flat clusters using the inconsistency
method with t as the cut-off threshold.
A one-dimensional numpy array ``T`` of length ``n`` is
returned. ``T[i]`` is the index of the flat cluster to which the
original observation ``i`` belongs.
:Arguments:
- Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the ``linkage`` function.
- t : double
The threshold to apply when forming flat clusters.
- criterion : string
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent', 'distance', or 'maxclust' cluster
formation algorithms. See ``fcluster`` for descriptions.
- method : string
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See ``linkage`` for more
information.
- metric : string
The distance metric for calculating pairwise distances. See
distance.pdist for descriptions and linkage to verify
compatibility with the linkage method.
- t : double
The cut-off threshold for the cluster function or the
maximum number of clusters (criterion='maxclust').
- depth : int
The maximum depth for the inconsistency calculation. See
``inconsistent`` for more information.
- R : ndarray
The inconsistency matrix. It will be computed if necessary
if it is not passed.
:Returns:
- T : ndarray
A vector of length ``n``. ``T[i]`` is the flat cluster number to
which original observation ``i`` belongs.
Notes
-----
This function is similar to MATLAB(TM) clusterdata function.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids (corresponding to observation
vector index) as they appear in the tree from left to right. Z is
a linkage matrix.
:Arguments:
- Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
:Returns:
- L : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy_wrap.prelist_wrap(Z, ML, int(n))
return ML
# Let's do a conditional import. If matplotlib is not available,
try:
import matplotlib
try:
import matplotlib.pylab
import matplotlib.patches
except RuntimeError, e:
# importing matplotlib.pylab can fail with a RuntimeError if installed
# but the graphic engine cannot be initialized (for example without X)
raise ImportError("Could not import matplotib (error was %s)" % str(e))
#import matplotlib.collections
_mpl = True
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements. The
set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, no_labels, color_list, leaf_font_size=None, leaf_rotation=None, contraction_marks=None):
axis = matplotlib.pylab.gca()
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl)*10+5, 10)
if orientation == 'top':
axis.set_ylim([0, dvw])
axis.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
axis.set_xticks([])
axis.set_xticklabels([])
else:
axis.set_xticks(ivticks)
axis.set_xticklabels(ivl)
axis.xaxis.set_ticks_position('bottom')
lbls=axis.get_xticklabels()
if leaf_rotation:
matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation)
else:
matplotlib.pylab.setp(lbls, 'rotation', float(_get_tick_rotation(len(ivl))))
if leaf_font_size:
matplotlib.pylab.setp(lbls, 'size', leaf_font_size)
else:
matplotlib.pylab.setp(lbls, 'size', float(_get_tick_text_size(len(ivl))))
# txt.set_fontsize()
# txt.set_rotation(45)
# Make the tick marks invisible because they cover up the links
for line in axis.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
axis.set_ylim([dvw, 0])
axis.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
axis.set_xticks([])
axis.set_xticklabels([])
else:
axis.set_xticks(ivticks)
axis.set_xticklabels(ivl)
lbls=axis.get_xticklabels()
if leaf_rotation:
matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation)
else:
matplotlib.pylab.setp(lbls, 'rotation', float(_get_tick_rotation(p)))
if leaf_font_size:
matplotlib.pylab.setp(lbls, 'size', leaf_font_size)
else:
matplotlib.pylab.setp(lbls, 'size', float(_get_tick_text_size(p)))
axis.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in axis.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
axis.set_xlim([0, dvw])
axis.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
axis.set_yticks([])
axis.set_yticklabels([])
else:
axis.set_yticks(ivticks)
axis.set_yticklabels(ivl)
lbls=axis.get_yticklabels()
if leaf_rotation:
matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation)
if leaf_font_size:
matplotlib.pylab.setp(lbls, 'size', leaf_font_size)
axis.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in axis.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
axis.set_xlim([dvw, 0])
axis.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
axis.set_yticks([])
axis.set_yticklabels([])
else:
axis.set_yticks(ivticks)
axis.set_yticklabels(ivl)
lbls=axis.get_yticklabels()
if leaf_rotation:
matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation)
if leaf_font_size:
matplotlib.pylab.setp(lbls, 'size', leaf_font_size)
axis.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in axis.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend item for each
# tree grouping, rather than stupidly one for each line segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline,yline,color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(zip(xline, yline))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color], colors=(color,))
colors_to_collections[color] = coll
# Add all the non-blue link groupings, i.e. those groupings below the color threshold.
for color in colors_used:
if color != 'b':
axis.add_collection(colors_to_collections[color])
# If there is a blue grouping (i.e., links above the color threshold),
# it should go last.
if 'b' in colors_to_collections:
axis.add_collection(colors_to_collections['b'])
if contraction_marks is not None:
#xs=[x for (x, y) in contraction_marks]
#ys=[y for (x, y) in contraction_marks]
if orientation in ('left', 'right'):
for (x,y) in contraction_marks:
e=matplotlib.patches.Ellipse((y, x), width=dvw/100, height=1.0)
axis.add_artist(e)
e.set_clip_box(axis.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x,y) in contraction_marks:
e=matplotlib.patches.Ellipse((x, y), width=1.0, height=dvw/100)
axis.add_artist(e)
e.set_clip_box(axis.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
#matplotlib.pylab.plot(xs, ys, 'go', markeredgecolor='k', markersize=3)
#matplotlib.pylab.plot(ys, xs, 'go', markeredgecolor='k', markersize=3)
matplotlib.pylab.draw_if_interactive()
except ImportError:
_mpl = False
def _plot_dendrogram(*args, **kwargs):
raise ImportError('matplotlib not available. Plot request denied.')
_link_line_colors=['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Changes the list of matplotlib color codes to use when coloring
links with the dendrogram color_threshold feature.
:Arguments:
- palette : A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (types.ListType, types.TupleType):
raise TypeError("palette must be a list or tuple")
_ptypes = [type(p) == types.StringType for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None):
r"""
Plots the hiearchical clustering defined by the linkage Z as a
dendrogram. The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
:Arguments:
- Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
- truncate_mode : string
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
* None/'none': no truncation is performed (Default)
* 'lastp': the last ``p`` non-singleton formed in the linkage
are the only non-leaf nodes in the linkage; they correspond
to to rows ``Z[n-p-2:end]`` in ``Z``. All other
non-singleton clusters are contracted into leaf nodes.
* 'mlab': This corresponds to MATLAB(TM) behavior. (not
implemented yet)
* 'level'/'mtica': no more than ``p`` levels of the
dendrogram tree are displayed. This corresponds to
Mathematica(TM) behavior.
- p : int
The ``p`` parameter for ``truncate_mode``.
`
- color_threshold : double
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is ``None`` or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
- get_leaves : bool
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
:math:`j` appears in the :math:`i` th position in the
left-to-right traversal of the leaves, where :math:`j < 2n-1`
and :math:`i < n`.
- orientation : string
The direction to plot the dendrogram, which can be any
of the following strings
* 'top': plots the root at the top, and plot descendent
links going downwards. (default).
* 'bottom': plots the root at the bottom, and plot descendent
links going upwards.
* 'left': plots the root at the left, and plot descendent
links going right.
* 'right': plots the root at the right, and plot descendent
links going left.
- labels : ndarray
By default ``labels`` is ``None`` so the index of the
original observation is used to label the leaf nodes.
Otherwise, this is an :math:`n` -sized list (or tuple). The
``labels[i]`` value is the text to put under the :math:`i` th
leaf node only if it corresponds to an original observation
and not a non-singleton cluster.
- count_sort : string/bool
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
* False: nothing is done.
* 'ascending'/True: the child with the minimum number of
original objects in its cluster is plotted first.
* 'descendent': the child with the maximum number of
original objects in its cluster is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be
``True``.
- distance_sort : string/bool
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
* False: nothing is done.
* 'ascending'/True: the child with the minimum distance
between its direct descendents is plotted first.
* 'descending': the child with the maximum distance
between its direct descendents is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be
``True``.
- show_leaf_counts : bool
When ``True``, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
- no_plot : bool
When ``True``, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
- no_labels : bool
When ``True``, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
- leaf_label_rotation : double
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation based on the number of
nodes in the dendrogram. (Default=0)
- leaf_font_size : int
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
- leaf_label_func : lambda or function
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
- show_contracted : bool
When ``True`` the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
- link_color_func : lambda/function When a callable function,
link_color_function is called with each non-singleton id
corresponding to each U-shaped link it will paint. The
function is expected to return the color to paint the link,
encoded as a matplotlib color string code.
For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
:Returns:
- R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
- 'icoords': a list of lists ``[I1, I2, ..., Ip]`` where
``Ik`` is a list of 4 independent variable coordinates
corresponding to the line that represents the k'th link
painted.
- 'dcoords': a list of lists ``[I2, I2, ..., Ip]`` where
``Ik`` is a list of 4 independent variable coordinates
corresponding to the line that represents the k'th link
painted.
- 'ivl': a list of labels corresponding to the leaf nodes.
- 'leaves': for each i, ``H[i] == j``, cluster node
:math:`j` appears in the :math:`i` th position in the
left-to-right traversal of the leaves, where :math:`j < 2n-1`
and :math:`i < n`. If :math:`j` is less than :math:`n`, the
:math:`i` th leaf node corresponds to an original
observation. Otherwise, it corresponds to a non-singleton
cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (types.IntType, types.FloatType):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list=[]
dcoord_list=[]
color_list=[]
current_color=[0]
currently_below_threshold=[False]
if no_leaves:
ivl=None
else:
ivl=[]
if color_threshold is None or \
(type(color_threshold) == types.StringType and color_threshold=='default'):
color_threshold = max(Z[:,2])*0.7
R={'icoord':icoord_list, 'dcoord':dcoord_list, 'ivl':ivl, 'leaves':lvs,
'color_list':color_list}
props = {'cbt': False, 'cc':0}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(Z=Z, p=p,
truncate_mode=truncate_mode, \
color_threshold=color_threshold, \
get_leaves=get_leaves, \
orientation=orientation, \
labels=labels, \
count_sort=count_sort, \
distance_sort=distance_sort, \
show_leaf_counts=show_leaf_counts, \
i=2*n-2, iv=0.0, ivl=ivl, n=n, \
icoord_list=icoord_list, \
dcoord_list=dcoord_list, lvs=lvs, \
current_color=current_color, \
color_list=color_list, \
currently_below_threshold=currently_below_threshold, \
leaf_label_func=leaf_label_func, \
contraction_marks=contraction_marks, \
link_color_func=link_color_func)
if not no_plot:
mh = max(Z[:,2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, no_labels, color_list, leaf_font_size=leaf_font_size, leaf_rotation=leaf_rotation, contraction_marks=contraction_marks)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i-n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i-n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, Z[i-n, 0], n, contraction_marks)
_append_contraction_marks_sub(Z, iv, Z[i-n, 1], n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if (i >= n):
contraction_marks.append((iv, Z[i-n, 2]))
_append_contraction_marks_sub(Z, iv, Z[i-n, 0], n, contraction_marks)
_append_contraction_marks_sub(Z, iv, Z[i-n, 1], n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode, \
color_threshold=np.inf, get_leaves=True, \
orientation='top', labels=None, \
count_sort=False, distance_sort=False, \
show_leaf_counts=False, i=-1, iv=0.0, \
ivl=[], n=0, icoord_list=[], dcoord_list=[], \
lvs=None, mhr=False, \
current_color=[], color_list=[], \
currently_below_threshold=[], \
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns a tuple (left, w, h, md)
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2*n-p and i >= n:
d = Z[i-n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i-n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = Z[i-n, 0]
ab = Z[i-n, 1]
if aa > n:
# The number of singletons below cluster a
na = Z[aa-n, 3]
# The distance between a's two direct children.
da = Z[aa-n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab-n, 3]
db = Z[ab-n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# The distance of the cluster to draw to the left (ua) is uad
# and its count is uan. Likewise, the cluster to draw to the
# right has distance ubd and count ubn.
if ua < n:
uad = 0.0
uan = 1
else:
uad = Z[ua-n, 2]
uan = Z[ua-n, 3]
if ub < n:
ubd = 0.0
ubn = 1
else:
ubd = Z[ub-n, 2]
ubn = Z[ub-n, 3]
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(Z=Z, p=p, \
truncate_mode=truncate_mode, \
color_threshold=color_threshold, \
get_leaves=get_leaves, \
orientation=orientation, \
labels=labels, \
count_sort=count_sort, \
distance_sort=distance_sort, \
show_leaf_counts=show_leaf_counts, \
i=ua, iv=iv, ivl=ivl, n=n, \
icoord_list=icoord_list, \
dcoord_list=dcoord_list, lvs=lvs, \
current_color=current_color, \
color_list=color_list, \
currently_below_threshold=currently_below_threshold, \
leaf_label_func=leaf_label_func, \
level=level+1, contraction_marks=contraction_marks, \
link_color_func=link_color_func)
h = Z[i-n, 2]
if h >= color_threshold or color_threshold <= 0:
c = 'b'
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(Z=Z, p=p, \
truncate_mode=truncate_mode, \
color_threshold=color_threshold, \
get_leaves=get_leaves, \
orientation=orientation, \
labels=labels, \
count_sort=count_sort, \
distance_sort=distance_sort, \
show_leaf_counts=show_leaf_counts, \
i=ub, iv=iv+uwa, ivl=ivl, n=n, \
icoord_list=icoord_list, \
dcoord_list=dcoord_list, lvs=lvs, \
current_color=current_color, \
color_list=color_list, \
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func, \
level=level+1, contraction_marks=contraction_marks, \
link_color_func=link_color_func)
# The height of clusters a and b
ah = uad
bh = ubd
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if type(v) != types.StringType:
raise TypeError("link_color_func must return a matplotlib color string!")
color_list.append(v)
else:
color_list.append(c)
return ( ((uiva + uivb) / 2), uwa+uwb, h, max_dist)
def is_isomorphic(T1, T2):
r"""
Determines if two different cluster assignments ``T1`` and
``T2`` are equivalent.
:Arguments:
- T1 : ndarray
An assignment of singleton cluster ids to flat cluster
ids.
- T2 : ndarray
An assignment of singleton cluster ids to flat cluster
ids.
:Returns:
- b : boolean
Whether the flat cluster assignments ``T1`` and ``T2`` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0,n):
if T1[i] in d.keys():
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
r"""
MD = maxdists(Z)
Returns the maximum distance between any cluster for each
non-singleton cluster.
:Arguments:
- Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
:Returns:
- MD : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n-1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy_wrap.get_max_dist_for_each_cluster_wrap(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
r"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
:Arguments:
- Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
- R : ndarray
The inconsistency matrix.
:Returns:
- MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each have a different number of rows.")
MI = np.zeros((n-1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
r"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
:Arguments:
- Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
- R : ndarray
The inconsistency matrix.
- i : int
The column of ``R`` to use as the statistic.
:Returns:
- MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix ``R`` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not types.IntType:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n-1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
r"""
(L, M) = leaders(Z, T):
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
:Arguments:
- Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
- T : ndarray
The flat cluster assignment vector.
:Returns: (L, M)
- L : ndarray
The leader linkage node id's stored as a k-element 1D
array where :math:`k` is the number of flat clusters found
in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
- M : ndarray
The leader linkage node id's stored as a k-element 1D
array where :math:`k` is the number of flat clusters found
in ``T``. This allows the set of flat cluster ids to be
any arbitrary set of :math:`k` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError('T is not a valid assignment vector. Error found when examining linkage node %d (< 2n-1).' % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print 'ndid: %d lid: %d lfid: %d rid: %d rfid: %d' % (tr.get_id(),
left.get_id(), lfid, right.get_id(), rfid)
if lfid != rfid:
if lfid != -1:
print 'leader: %d with tag %d' % (left.id, lfid)
if rfid != -1:
print 'leader: %d with tag %d' % (right.id, rfid)
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
|
bsd-3-clause
|
sahmed95/sympy
|
doc/ext/docscrape_sphinx.py
|
52
|
7983
|
import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
bsd-3-clause
|
chalkyam/Photometry_Pipeline
|
scripts/getdata2.0.py
|
1
|
1679
|
import numpy as np
import pylab as P
import urllib2
import matplotlib.pyplot as plt
#url='http://www.astro.yale.edu/smarts/glast/tables/3C279.tab'
#open("3c279.dat",'wb').write(urllib2.urlopen(url).read())
jdstart,bjdstart,bmag,berr,vjdstart,vmag,verr,rjdstart,rmag,rerr,jjdstart,jmag,jerr,kjdstart,kmag,kerr = np.genfromtxt('3c279.dat', unpack=True,dtype=float,skip_header=3)
print jdstart.dtype
print bjdstart.dtype
print vjdstart.dtype
print rjdstart.dtype
print jjdstart.dtype
print kjdstart.dtype
print bmag.dtype
print vmag.dtype
print rmag.dtype
print jmag.dtype
print kmag.dtype
print berr.dtype
print verr.dtype
print rerr.dtype
print jerr.dtype
print kerr.dtype
#print bjdstart
#bjdstart = "999."
#if bjdstart == "999.":
#print('ummmmm')
#print bjdstart
# all objects are arrays (numpy.ndarray)
print type(jdstart)
print type(bjdstart)
print type(vjdstart)
print type(rjdstart)
print type(jjdstart)
print type(kjdstart)
print type(bmag)
print type(vmag)
print type(rmag)
print type(jmag)
print type(kmag)
print type(berr)
print type(verr)
print type(rerr)
print type(jerr)
print type(kerr)
#plotting error bars
#plt.errorbar(jdstart,bmag-jmag, bmagerr=berr, jmagerr = jerr)
#bad = '999.0'
#bad2 = '-999.0'
#if bad or bad2 in jdstart or bjdstart or bmag or berr or vjdstart or vmag or verr or rjdstart or rmag or rerr or jjdstart or jmag or jerr or kjdstart or kmag or kerr:
#print "bad"
#else:
#print bmag
#np.float
#print len()
#jdstart = jdstart[595:]
#bmag = bmag[595:]
#jmag = jmag[595:]
#plt.scatter(jdstart,bmag-jmag)
#plt.title("b-j vs. time")
#plt.xlabel("J")
#plt.ylabel("B-J")
#plt.ylim([2.5,5])
#plt.xlim()
#plt.show()
|
mit
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/spyder/config/main.py
|
1
|
30624
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder configuration options
Note: Leave this file free of Qt related imports, so that it can be used to
quickly load a user config file
"""
import os
import sys
import os.path as osp
# Local import
from spyder.config.base import (CHECK_ALL, EXCLUDED_NAMES, get_home_dir,
SUBFOLDER, TEST)
from spyder.config.fonts import BIG, MEDIUM, MONOSPACE, SANS_SERIF
from spyder.config.user import UserConfig
from spyder.config.utils import IMPORT_EXT
from spyder.utils import codeanalysis
#==============================================================================
# Main constants
#==============================================================================
# Find in files exclude patterns
EXCLUDE_PATTERNS = [r'\.pyc$|\.pyo$|\.orig$|\.hg|\.svn|\bbuild\b',
r'\.pyc$|\.pyo$|\.orig$|\.hg|\.svn']
# Extensions that should be visible in Spyder's file/project explorers
SHOW_EXT = ['.py', '.ipynb', '.txt', '.dat', '.pdf', '.png', '.svg']
# Extensions supported by Spyder (Editor or Variable explorer)
USEFUL_EXT = IMPORT_EXT + SHOW_EXT
# Name filters for file/project explorers (excluding files without extension)
NAME_FILTERS = ['README', 'INSTALL', 'LICENSE', 'CHANGELOG'] + \
['*' + _ext for _ext in USEFUL_EXT if _ext]
# Port used to detect if there is a running instance and to communicate with
# it to open external files
OPEN_FILES_PORT = 21128
# OS Specific
WIN = os.name == 'nt'
MAC = sys.platform == 'darwin'
CTRL = "Meta" if MAC else "Ctrl"
# Run cell shortcuts
if sys.platform == 'darwin':
RUN_CELL_SHORTCUT = 'Meta+Return'
else:
RUN_CELL_SHORTCUT = 'Ctrl+Return'
RUN_CELL_AND_ADVANCE_SHORTCUT = 'Shift+Return'
# =============================================================================
# Defaults
# =============================================================================
DEFAULTS = [
('main',
{
'icon_theme': 'spyder 3',
'single_instance': True,
'open_files_port': OPEN_FILES_PORT,
'tear_off_menus': False,
'high_dpi_scaling': False,
'vertical_dockwidget_titlebars': False,
'vertical_tabs': False,
'animated_docks': True,
'prompt_on_exit': False,
'panes_locked': True,
'window/size': (1260, 740),
'window/position': (10, 10),
'window/is_maximized': True,
'window/is_fullscreen': False,
'window/prefs_dialog_size': (745, 411),
'show_status_bar': True,
'memory_usage/enable': True,
'memory_usage/timeout': 2000,
'cpu_usage/enable': False,
'cpu_usage/timeout': 2000,
'use_custom_margin': True,
'custom_margin': 0,
'show_internal_console_if_traceback': True,
'check_updates_on_startup': True,
'toolbars_visible': True,
# Global Spyder fonts
'font/family': MONOSPACE,
'font/size': MEDIUM,
'font/italic': False,
'font/bold': False,
'rich_font/family': SANS_SERIF,
'rich_font/size': BIG,
'rich_font/italic': False,
'rich_font/bold': False,
'cursor/width': 2,
'completion/size': (300, 180),
}),
('quick_layouts',
{
'place_holder': '',
'names': ['Matlab layout', 'Rstudio layout', 'Vertical split', 'Horizontal split'],
'order': ['Matlab layout', 'Rstudio layout', 'Vertical split', 'Horizontal split'],
'active': ['Matlab layout', 'Rstudio layout', 'Vertical split', 'Horizontal split'],
}),
('internal_console',
{
'max_line_count': 300,
'working_dir_history': 30,
'working_dir_adjusttocontents': False,
'wrap': True,
'calltips': True,
'codecompletion/auto': False,
'codecompletion/enter_key': True,
'codecompletion/case_sensitive': True,
'external_editor/path': 'SciTE',
'external_editor/gotoline': '-goto:',
'light_background': True,
}),
('main_interpreter',
{
'default': True,
'custom': False,
'umr/enabled': True,
'umr/verbose': True,
'umr/namelist': [],
}),
('console',
{
'max_line_count': 500,
'wrap': True,
'single_tab': True,
'calltips': True,
'codecompletion/auto': True,
'codecompletion/enter_key': True,
'codecompletion/case_sensitive': True,
'show_elapsed_time': False,
'show_icontext': False,
'monitor/enabled': True,
'qt/api': 'default',
'matplotlib/backend/value': 0,
'light_background': True,
'merge_output_channels': os.name != 'nt',
'colorize_sys_stderr': os.name != 'nt',
'pythonstartup/default': True,
'pythonstartup/custom': False,
'ets_backend': 'qt4'
}),
('ipython_console',
{
'show_banner': True,
'completion_type': 0,
'use_pager': False,
'show_calltips': True,
'ask_before_closing': False,
'buffer_size': 500,
'pylab': True,
'pylab/autoload': False,
'pylab/backend': 0,
'pylab/inline/figure_format': 0,
'pylab/inline/resolution': 72,
'pylab/inline/width': 6,
'pylab/inline/height': 4,
'startup/run_lines': '',
'startup/use_run_file': False,
'startup/run_file': '',
'greedy_completer': False,
'autocall': 0,
'symbolic_math': False,
'in_prompt': '',
'out_prompt': '',
'light_color': True,
'dark_color': False
}),
('variable_explorer',
{
'autorefresh': False,
'autorefresh/timeout': 2000,
'check_all': CHECK_ALL,
'dataframe_format': '.3g', # no percent sign to avoid problems
# with ConfigParser's interpolation
'excluded_names': EXCLUDED_NAMES,
'exclude_private': True,
'exclude_uppercase': True,
'exclude_capitalized': False,
'exclude_unsupported': True,
'truncate': True,
'minmax': False
}),
('editor',
{
'printer_header/font/family': SANS_SERIF,
'printer_header/font/size': MEDIUM,
'printer_header/font/italic': False,
'printer_header/font/bold': False,
'wrap': False,
'wrapflag': True,
'code_analysis/pyflakes': True,
'code_analysis/pep8': False,
'todo_list': True,
'realtime_analysis': True,
'realtime_analysis/timeout': 2500,
'outline_explorer': True,
'line_numbers': True,
'blank_spaces': False,
'edge_line': True,
'edge_line_column': 79,
'toolbox_panel': True,
'calltips': True,
'go_to_definition': True,
'close_parentheses': True,
'close_quotes': False,
'add_colons': True,
'auto_unindent': True,
'indent_chars': '* *',
'tab_stop_width_spaces': 4,
'codecompletion/auto': True,
'codecompletion/enter_key': True,
'codecompletion/case_sensitive': True,
'check_eol_chars': True,
'tab_always_indent': False,
'intelligent_backspace': True,
'highlight_current_line': True,
'highlight_current_cell': True,
'occurrence_highlighting': True,
'occurrence_highlighting/timeout': 1500,
'always_remove_trailing_spaces': False,
'fullpath_sorting': True,
'show_tab_bar': True,
'max_recent_files': 20,
'save_all_before_run': True,
'focus_to_editor': True,
'onsave_analysis': False
}),
('historylog',
{
'enable': True,
'max_entries': 100,
'wrap': True,
'go_to_eof': True,
}),
('help',
{
'enable': True,
'max_history_entries': 20,
'wrap': True,
'connect/editor': False,
'connect/python_console': False,
'connect/ipython_console': False,
'math': True,
'automatic_import': True,
}),
('onlinehelp',
{
'enable': True,
'zoom_factor': .8,
'max_history_entries': 20,
}),
('outline_explorer',
{
'enable': True,
'show_fullpath': False,
'show_all_files': False,
'show_comments': True,
}),
('project_explorer',
{
'name_filters': NAME_FILTERS,
'show_all': True,
'show_hscrollbar': True
}),
('explorer',
{
'enable': True,
'wrap': True,
'name_filters': NAME_FILTERS,
'show_hidden': True,
'show_all': True,
'show_icontext': False,
}),
('find_in_files',
{
'enable': True,
'supported_encodings': ["utf-8", "iso-8859-1", "cp1252"],
'include': '',
'include_regexp': True,
'exclude': EXCLUDE_PATTERNS,
'exclude_regexp': True,
'search_text_regexp': True,
'search_text': [''],
'search_text_samples': [codeanalysis.TASKS_PATTERN],
'in_python_path': False,
'more_options': False,
}),
('workingdir',
{
'editor/open/browse_scriptdir': True,
'editor/open/browse_workdir': False,
'editor/new/browse_scriptdir': False,
'editor/new/browse_workdir': True,
'editor/open/auto_set_to_basedir': False,
'editor/save/auto_set_to_basedir': False,
'working_dir_adjusttocontents': False,
'working_dir_history': 20,
'startup/use_last_directory': True,
}),
('shortcuts',
{
# ---- Global ----
# -- In app/spyder.py
'_/close pane': "Shift+Ctrl+F4",
'_/lock unlock panes': "Shift+Ctrl+F5",
'_/use next layout': "Shift+Alt+PgDown",
'_/use previous layout': "Shift+Alt+PgUp",
'_/preferences': "Ctrl+Alt+Shift+P",
'_/maximize pane': "Ctrl+Alt+Shift+M",
'_/fullscreen mode': "F11",
'_/save current layout': "Shift+Alt+S",
'_/layout preferences': "Shift+Alt+P",
'_/show toolbars': "Alt+Shift+T",
'_/spyder documentation': "F1",
'_/restart': "Shift+Alt+R",
'_/quit': "Ctrl+Q",
# -- In plugins/editor
'_/file switcher': 'Ctrl+P',
'_/symbol finder': 'Ctrl+Alt+P',
'_/debug': "Ctrl+F5",
'_/debug step over': "Ctrl+F10",
'_/debug continue': "Ctrl+F12",
'_/debug step into': "Ctrl+F11",
'_/debug step return': "Ctrl+Shift+F11",
'_/debug exit': "Ctrl+Shift+F12",
'_/run': "F5",
'_/configure': "Ctrl+F6",
'_/re-run last script': "F6",
# -- In plugins/init
'_/switch to help': "Ctrl+Shift+H",
'_/switch to outline_explorer': "Ctrl+Shift+O",
'_/switch to editor': "Ctrl+Shift+E",
'_/switch to historylog': "Ctrl+Shift+L",
'_/switch to onlinehelp': "Ctrl+Shift+D",
'_/switch to project_explorer': "Ctrl+Shift+P",
'_/switch to console': "Ctrl+Shift+C",
'_/switch to ipython_console': "Ctrl+Shift+I",
'_/switch to variable_explorer': "Ctrl+Shift+V",
'_/switch to find_in_files': "Ctrl+Shift+F",
'_/switch to explorer': "Ctrl+Shift+X",
# -- In widgets/findreplace.py
'_/find text': "Ctrl+F",
'_/find next': "F3",
'_/find previous': "Shift+F3",
'_/replace text': "Ctrl+R",
'_/hide find and replace': "Escape",
# ---- Editor ----
# -- In widgets/sourcecode/codeeditor.py
'editor/code completion': CTRL+'+Space',
'editor/duplicate line': "Ctrl+Alt+Up" if WIN else \
"Shift+Alt+Up",
'editor/copy line': "Ctrl+Alt+Down" if WIN else \
"Shift+Alt+Down",
'editor/delete line': 'Ctrl+D',
'editor/transform to uppercase': 'Ctrl+Shift+U',
'editor/transform to lowercase': 'Ctrl+U',
'editor/move line up': "Alt+Up",
'editor/move line down': "Alt+Down",
'editor/go to definition': "Ctrl+G",
'editor/toggle comment': "Ctrl+1",
'editor/blockcomment': "Ctrl+4",
'editor/unblockcomment': "Ctrl+5",
'editor/start of line': "Meta+A",
'editor/end of line': "Meta+E",
'editor/previous line': "Meta+P",
'editor/next line': "Meta+N",
'editor/previous char': "Meta+B",
'editor/next char': "Meta+F",
'editor/previous word': "Meta+Left",
'editor/next word': "Meta+Right",
'editor/kill to line end': "Meta+K",
'editor/kill to line start': "Meta+U",
'editor/yank': 'Meta+Y',
'editor/rotate kill ring': 'Shift+Meta+Y',
'editor/kill previous word': 'Meta+Backspace',
'editor/kill next word': 'Meta+D',
'editor/start of document': 'Ctrl+Up',
'editor/end of document': 'Ctrl+Down',
'editor/undo': 'Ctrl+Z',
'editor/redo': 'Ctrl+Shift+Z',
'editor/cut': 'Ctrl+X',
'editor/copy': 'Ctrl+C',
'editor/paste': 'Ctrl+V',
'editor/delete': 'Delete',
'editor/select all': "Ctrl+A",
# -- In widgets/editor.py
'editor/inspect current object': 'Ctrl+I',
'editor/breakpoint': 'F12',
'editor/conditional breakpoint': 'Shift+F12',
'editor/run selection': "F9",
'editor/go to line': 'Ctrl+L',
'editor/go to previous file': 'Ctrl+Tab',
'editor/go to next file': 'Ctrl+Shift+Tab',
'editor/new file': "Ctrl+N",
'editor/open last closed':"Ctrl+Shift+T",
'editor/open file': "Ctrl+O",
'editor/save file': "Ctrl+S",
'editor/save all': "Ctrl+Alt+S",
'editor/save as': 'Ctrl+Shift+S',
'editor/close all': "Ctrl+Shift+W",
'editor/last edit location': "Ctrl+Alt+Shift+Left",
'editor/previous cursor position': "Ctrl+Alt+Left",
'editor/next cursor position': "Ctrl+Alt+Right",
'editor/zoom in 1': "Ctrl++",
'editor/zoom in 2': "Ctrl+=",
'editor/zoom out': "Ctrl+-",
'editor/zoom reset': "Ctrl+0",
'editor/close file 1': "Ctrl+W",
'editor/close file 2': "Ctrl+F4",
'editor/run cell': RUN_CELL_SHORTCUT,
'editor/run cell and advance': RUN_CELL_AND_ADVANCE_SHORTCUT,
# -- In plugins/editor.py
'editor/show/hide outline': "Ctrl+Alt+O",
# -- In Breakpoints
'_/switch to breakpoints': "Ctrl+Shift+B",
# ---- Consoles (in widgets/shell) ----
'console/inspect current object': "Ctrl+I",
'console/clear shell': "Ctrl+L",
'console/clear line': "Shift+Escape",
# ---- In Pylint ----
'pylint/run analysis': "F8",
# ---- In Profiler ----
'profiler/run profiler': "F10",
# ---- In widgets/ipythonconsole/shell.py ----
'ipython_console/new tab': "Ctrl+T",
'ipython_console/reset namespace': "Ctrl+Alt+R",
'ipython_console/restart kernel': "Ctrl+.",
# ---- In widgets/arraybuider.py ----
'array_builder/enter array inline': "Ctrl+Alt+M",
'array_builder/enter array table': "Ctrl+M",
# ---- In widgets/variableexplorer/aarayeditor.py ----
'variable_explorer/copy': 'Ctrl+C',
}),
('color_schemes',
{
'names': ['emacs', 'idle', 'monokai', 'pydev', 'scintilla',
'spyder', 'spyder/dark', 'zenburn', 'solarized/light',
'solarized/dark'],
'selected': 'spyder',
# ---- Emacs ----
'emacs/name': "Emacs",
# Name Color Bold Italic
'emacs/background': "#000000",
'emacs/currentline': "#2b2b43",
'emacs/currentcell': "#1c1c2d",
'emacs/occurrence': "#abab67",
'emacs/ctrlclick': "#0000ff",
'emacs/sideareas': "#555555",
'emacs/matched_p': "#009800",
'emacs/unmatched_p': "#c80000",
'emacs/normal': ('#ffffff', False, False),
'emacs/keyword': ('#3c51e8', False, False),
'emacs/builtin': ('#900090', False, False),
'emacs/definition': ('#ff8040', True, False),
'emacs/comment': ('#005100', False, False),
'emacs/string': ('#00aa00', False, True),
'emacs/number': ('#800000', False, False),
'emacs/instance': ('#ffffff', False, True),
# ---- IDLE ----
'idle/name': "IDLE",
# Name Color Bold Italic
'idle/background': "#ffffff",
'idle/currentline': "#f2e6f3",
'idle/currentcell': "#feefff",
'idle/occurrence': "#e8f2fe",
'idle/ctrlclick': "#0000ff",
'idle/sideareas': "#efefef",
'idle/matched_p': "#99ff99",
'idle/unmatched_p': "#ff9999",
'idle/normal': ('#000000', False, False),
'idle/keyword': ('#ff7700', True, False),
'idle/builtin': ('#900090', False, False),
'idle/definition': ('#0000ff', False, False),
'idle/comment': ('#dd0000', False, True),
'idle/string': ('#00aa00', False, False),
'idle/number': ('#924900', False, False),
'idle/instance': ('#777777', True, True),
# ---- Monokai ----
'monokai/name': "Monokai",
# Name Color Bold Italic
'monokai/background': "#2a2b24",
'monokai/currentline': "#484848",
'monokai/currentcell': "#3d3d3d",
'monokai/occurrence': "#666666",
'monokai/ctrlclick': "#0000ff",
'monokai/sideareas': "#2a2b24",
'monokai/matched_p': "#688060",
'monokai/unmatched_p': "#bd6e76",
'monokai/normal': ("#ddddda", False, False),
'monokai/keyword': ("#f92672", False, False),
'monokai/builtin': ("#ae81ff", False, False),
'monokai/definition': ("#a6e22e", False, False),
'monokai/comment': ("#75715e", False, True),
'monokai/string': ("#e6db74", False, False),
'monokai/number': ("#ae81ff", False, False),
'monokai/instance': ("#ddddda", False, True),
# ---- Pydev ----
'pydev/name': "Pydev",
# Name Color Bold Italic
'pydev/background': "#ffffff",
'pydev/currentline': "#e8f2fe",
'pydev/currentcell': "#eff8fe",
'pydev/occurrence': "#ffff99",
'pydev/ctrlclick': "#0000ff",
'pydev/sideareas': "#efefef",
'pydev/matched_p': "#99ff99",
'pydev/unmatched_p': "#ff99992",
'pydev/normal': ('#000000', False, False),
'pydev/keyword': ('#0000ff', False, False),
'pydev/builtin': ('#900090', False, False),
'pydev/definition': ('#000000', True, False),
'pydev/comment': ('#c0c0c0', False, False),
'pydev/string': ('#00aa00', False, True),
'pydev/number': ('#800000', False, False),
'pydev/instance': ('#000000', False, True),
# ---- Scintilla ----
'scintilla/name': "Scintilla",
# Name Color Bold Italic
'scintilla/background': "#ffffff",
'scintilla/currentline': "#e1f0d1",
'scintilla/currentcell': "#edfcdc",
'scintilla/occurrence': "#ffff99",
'scintilla/ctrlclick': "#0000ff",
'scintilla/sideareas': "#efefef",
'scintilla/matched_p': "#99ff99",
'scintilla/unmatched_p': "#ff9999",
'scintilla/normal': ('#000000', False, False),
'scintilla/keyword': ('#00007f', True, False),
'scintilla/builtin': ('#000000', False, False),
'scintilla/definition': ('#007f7f', True, False),
'scintilla/comment': ('#007f00', False, False),
'scintilla/string': ('#7f007f', False, False),
'scintilla/number': ('#007f7f', False, False),
'scintilla/instance': ('#000000', False, True),
# ---- Spyder ----
'spyder/name': "Spyder",
# Name Color Bold Italic
'spyder/background': "#ffffff",
'spyder/currentline': "#f7ecf8",
'spyder/currentcell': "#fdfdde",
'spyder/occurrence': "#ffff99",
'spyder/ctrlclick': "#0000ff",
'spyder/sideareas': "#efefef",
'spyder/matched_p': "#99ff99",
'spyder/unmatched_p': "#ff9999",
'spyder/normal': ('#000000', False, False),
'spyder/keyword': ('#0000ff', False, False),
'spyder/builtin': ('#900090', False, False),
'spyder/definition': ('#000000', True, False),
'spyder/comment': ('#adadad', False, True),
'spyder/string': ('#00aa00', False, False),
'spyder/number': ('#800000', False, False),
'spyder/instance': ('#924900', False, True),
# ---- Spyder/Dark ----
'spyder/dark/name': "Spyder Dark",
# Name Color Bold Italic
'spyder/dark/background': "#131926",
'spyder/dark/currentline': "#2b2b43",
'spyder/dark/currentcell': "#31314e",
'spyder/dark/occurrence': "#abab67",
'spyder/dark/ctrlclick': "#0000ff",
'spyder/dark/sideareas': "#282828",
'spyder/dark/matched_p': "#009800",
'spyder/dark/unmatched_p': "#c80000",
'spyder/dark/normal': ('#ffffff', False, False),
'spyder/dark/keyword': ('#558eff', False, False),
'spyder/dark/builtin': ('#aa00aa', False, False),
'spyder/dark/definition': ('#ffffff', True, False),
'spyder/dark/comment': ('#7f7f7f', False, False),
'spyder/dark/string': ('#11a642', False, True),
'spyder/dark/number': ('#c80000', False, False),
'spyder/dark/instance': ('#be5f00', False, True),
# ---- Zenburn ----
'zenburn/name': "Zenburn",
# Name Color Bold Italic
'zenburn/background': "#3f3f3f",
'zenburn/currentline': "#333333",
'zenburn/currentcell': "#2c2c2c",
'zenburn/occurrence': "#7a738f",
'zenburn/ctrlclick': "#0000ff",
'zenburn/sideareas': "#3f3f3f",
'zenburn/matched_p': "#688060",
'zenburn/unmatched_p': "#bd6e76",
'zenburn/normal': ('#dcdccc', False, False),
'zenburn/keyword': ('#dfaf8f', True, False),
'zenburn/builtin': ('#efef8f', False, False),
'zenburn/definition': ('#efef8f', False, False),
'zenburn/comment': ('#7f9f7f', False, True),
'zenburn/string': ('#cc9393', False, False),
'zenburn/number': ('#8cd0d3', False, False),
'zenburn/instance': ('#dcdccc', False, True),
# ---- Solarized Light ----
'solarized/light/name': "Solarized Light",
# Name Color Bold Italic
'solarized/light/background': '#fdf6e3',
'solarized/light/currentline': '#f5efdB',
'solarized/light/currentcell': '#eee8d5',
'solarized/light/occurence': '#839496',
'solarized/light/ctrlclick': '#d33682',
'solarized/light/sideareas': '#eee8d5',
'solarized/light/matched_p': '#586e75',
'solarized/light/unmatched_p': '#dc322f',
'solarized/light/normal': ('#657b83', False, False),
'solarized/light/keyword': ('#859900', False, False),
'solarized/light/builtin': ('#6c71c4', False, False),
'solarized/light/definition': ('#268bd2', True, False),
'solarized/light/comment': ('#93a1a1', False, True),
'solarized/light/string': ('#2aa198', False, False),
'solarized/light/number': ('#cb4b16', False, False),
'solarized/light/instance': ('#b58900', False, True),
# ---- Solarized Dark ----
'solarized/dark/name': "Solarized Dark",
# Name Color Bold Italic
'solarized/dark/background': '#002b36',
'solarized/dark/currentline': '#083f4d',
'solarized/dark/currentcell': '#073642',
'solarized/dark/occurence': '#657b83',
'solarized/dark/ctrlclick': '#d33682',
'solarized/dark/sideareas': '#073642',
'solarized/dark/matched_p': '#93a1a1',
'solarized/dark/unmatched_p': '#dc322f',
'solarized/dark/normal': ('#839496', False, False),
'solarized/dark/keyword': ('#859900', False, False),
'solarized/dark/builtin': ('#6c71c4', False, False),
'solarized/dark/definition': ('#268bd2', True, False),
'solarized/dark/comment': ('#586e75', False, True),
'solarized/dark/string': ('#2aa198', False, False),
'solarized/dark/number': ('#cb4b16', False, False),
'solarized/dark/instance': ('#b58900', False, True)
})
]
#==============================================================================
# Config instance
#==============================================================================
# IMPORTANT NOTES:
# 1. If you want to *change* the default value of a current option, you need to
# do a MINOR update in config version, e.g. from 3.0.0 to 3.1.0
# 2. If you want to *remove* options that are no longer needed in our codebase,
# or if you want to *rename* options, then you need to do a MAJOR update in
# version, e.g. from 3.0.0 to 4.0.0
# 3. You don't need to touch this value if you're just adding a new option
CONF_VERSION = '32.0.0'
# Main configuration instance
try:
CONF = UserConfig('spyder', defaults=DEFAULTS, load=(not TEST),
version=CONF_VERSION, subfolder=SUBFOLDER, backup=True,
raw_mode=True)
except:
CONF = UserConfig('spyder', defaults=DEFAULTS, load=False,
version=CONF_VERSION, subfolder=SUBFOLDER, backup=True,
raw_mode=True)
# Removing old .spyder.ini location:
old_location = osp.join(get_home_dir(), '.spyder.ini')
if osp.isfile(old_location):
os.remove(old_location)
|
gpl-3.0
|
dingocuster/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
241
|
1761
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
|
bsd-3-clause
|
jskDr/jamespy_py3
|
jseaborn.py
|
1
|
7302
|
# Python 3 confirmed, Mar 29 2016 (Indent by space)
# this is extension of seaborn by James for machine learning.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# The following libraries are designed by James
import jgrid
def set_pdi_d(pdi_d, method, grid_scores_):
pdi_d[method] = pd.DataFrame()
#print type( val.cv_validation_scores)
for val in grid_scores_:
r2_a = val.cv_validation_scores
pdx = pd.DataFrame()
pdx["Method"] = [method]
pdx["alpha"] = [val.parameters["alpha"]]
pdx["mean(r2)"] = [np.mean(r2_a)]
pdx["std(r2)"] = [np.std(r2_a)]
pdx["r2_a"] = [r2_a]
pdi_d[method] = pdi_d[method].append(pdx, ignore_index=True)
return pdi_d
def set_pdi_d_full(pdi_d, method, xM_l, yV):
xM = np.concatenate(xM_l, axis = 1)
gs = jgrid.gs_Ridge(xM, yV, (-3, 2, 10), n_folds=20)
# gs.grid_scores_
set_pdi_d(pdi_d, method, gs.grid_scores_)
pdi_d[ method].plot(kind ='line', x='alpha', y='mean(r2)', yerr='std(r2)', logx=True)
plt.ylabel(r"E[$r^2$]")
return pdi_d[method]
def _pdi_gs_r0(method, grid_scores_, expension=False):
pdi = pd.DataFrame()
#print type( val.cv_validation_scores)
for val in grid_scores_:
r2_a = val.cv_validation_scores
pdx = pd.DataFrame()
if expension:
pdx["Method"] = [ method] * r2_a.shape[0]
pdx["alpha"] = [ val.parameters["alpha"]] * r2_a.shape[0]
pdx["unit"] = list(range( r2_a.shape[0]))
pdx["r2"] = r2_a
else:
pdx["Method"] = [ method]
pdx["alpha"] = [ val.parameters["alpha"]]
pdx["mean(r2)"] = [np.mean( r2_a)]
pdx["std(r2)"] = [np.std( r2_a)]
pdx["r2_a"] = [r2_a]
pdi = pdi.append( pdx, ignore_index = True)
return pdi
def _pdi_gs_full_r0( method, xM_l, yV, expension = False):
xM = np.concatenate( xM_l, axis = 1)
gs = jgrid.gs_Ridge( xM, yV, (-3, 2, 10), n_folds=20)
# gs.grid_scores_
if expension:
pdi = pdi_gs( method, gs.grid_scores_, expension = expension)
else:
pdi = pdi_gs( pdi_d, method, gs.grid_scores_)
pdi.plot( kind ='line', x = 'alpha', y = 'mean(r2)', yerr = 'std(r2)', logx = True)
plt.ylabel( r"E[$r^2$]")
return pdi
def pdi_gs( method, grid_scores_, expension = False):
pdi = pd.DataFrame()
#print type( val.cv_validation_scores)
for val in grid_scores_:
r2_a = val.cv_validation_scores
pdx = pd.DataFrame()
if expension:
pdx["Method"] = [ method] * r2_a.shape[0]
pdx["alpha"] = [ val.parameters["alpha"]] * r2_a.shape[0]
pdx["unit"] = list(range( r2_a.shape[0]))
pdx["r2"] = r2_a
else:
pdx["Method"] = [ method]
pdx["alpha"] = [ val.parameters["alpha"]]
pdx["mean(r2)"] = [np.mean( r2_a)]
pdx["std(r2)"] = [np.std( r2_a)]
pdx["r2_a"] = [r2_a]
pdi = pdi.append( pdx, ignore_index = True)
return pdi
def _pdi_gs_full_r0( method, xM_l, yV, X_concat = None, mode = "Ridge", expension = False, n_folds=20):
if mode == "Ridge":
xM = np.concatenate( xM_l, axis = 1)
gs = jgrid.gs_Ridge( xM, yV, (-3, 2, 12), n_folds=n_folds)
elif mode == "BIKE_Ridge":
# print "BIKE_Ridge mode is working now."
A_l = xM_l
gs = jgrid.gs_BIKE_Ridge( A_l, yV, alphas_log=(-3, 2, 12), X_concat = X_concat, n_folds=n_folds)
else:
print("Mode {} is not supported.".format( mode))
# gs.grid_scores_
if expension:
pdi = pdi_gs( method, gs.grid_scores_, expension = expension)
else:
pdi = pdi_gs( method, gs.grid_scores_)
pdi.plot( kind ='line', x = 'alpha', y = 'mean(r2)', yerr = 'std(r2)', logx = True)
plt.ylabel( r"E[$r^2$]")
return pdi
def _pdi_gs_full_r1( method, xM_l, yV, X_concat = None, mode = "Ridge", expension = False,
n_folds=20, alphas_log=(-3, 2, (2-(-3))*2+1)):
if mode == "Ridge":
xM = np.concatenate( xM_l, axis = 1)
gs = jgrid.gs_Ridge( xM, yV, alphas_log, n_folds=n_folds)
elif mode == "BIKE_Ridge":
# print "BIKE_Ridge mode is working now."
A_l = xM_l
gs = jgrid.gs_BIKE_Ridge( A_l, yV, alphas_log=alphas_log, X_concat = X_concat, n_folds=n_folds)
else:
print("Mode {} is not supported.".format( mode))
# gs.grid_scores_
if expension:
pdi = pdi_gs( method, gs.grid_scores_, expension = expension)
else:
pdi = pdi_gs( method, gs.grid_scores_)
pdi.plot( kind ='line', x = 'alpha', y = 'mean(r2)', yerr = 'std(r2)', logx = True)
plt.ylabel( r"E[$r^2$]")
return pdi
def pdi_gs_full( method, xM_l, yV, X_concat = None, mode = "Ridge", expension = False,
n_folds=20, alphas_log=(-3, 2, (2-(-3))*2+1), n_jobs = 1, scoring = 'r2'):
if mode == "Ridge":
xM = np.concatenate( xM_l, axis = 1)
gs = jgrid.gs_Ridge( xM, yV, alphas_log, n_folds=n_folds, n_jobs = n_jobs, scoring=scoring)
elif mode == "BIKE_Ridge":
# print "BIKE_Ridge mode is working now."
A_l = xM_l
gs = jgrid.gs_BIKE_Ridge( A_l, yV, alphas_log=alphas_log, X_concat = X_concat,
n_folds=n_folds, n_jobs = n_jobs)
else:
print("Mode {} is not supported.".format( mode))
# gs.grid_scores_
if expension:
pdi = pdi_gs( method, gs.grid_scores_, expension = expension)
else:
pdi = pdi_gs( method, gs.grid_scores_)
pdi.plot( kind ='line', x = 'alpha', y = 'mean(r2)', yerr = 'std(r2)', logx = True)
plt.ylabel( r"E[$r^2$]")
return pdi
def expension_4_boxplot( pdr, method_l, x, y, hue):
pdw = pd.DataFrame()
val_l = list()
methods = list()
for m in method_l:
methods.extend( [m] * pdr.shape[0])
val_l.extend( pdr[ m].tolist())
pdw[ hue] = methods
pdw[ x] = pdr[ x].tolist() * len(method_l)
pdw[ y] = val_l
return pdw
def boxplot_expension( pdr, method_l, x="Group", y="RP", hue="Method"):
# method_l = ['No_Regression', 'Mean_Compensation', 'Linear', 'Exp']
val_s = y
pdw = expension_4_boxplot( pdr, method_l, x=x, y=y, hue=hue)
sns.boxplot(x="Group", y=val_s, hue="Method", data=pdw, palette="PRGn")
sns.despine(offset=10, trim=True)
def get_ts_df( N):
"""
2D ndarray to pd.DataFrame
"""
df = pd.DataFrame()
df['value'] = N.reshape(-1)
df['time'] = list(range( N.shape[1])) * N.shape[0]
df['unit'] = np.repeat( range( N.shape[0]), N.shape[1])
return df
def np_tsplot( N):
sns.tsplot( get_ts_df( N), time='time', unit='unit', value='value')
def tsplot_clusters( X, y):
"""
X, 2d array with y, cluster index
"""
for yit in list(set(y)):
sns.tsplot( X[y==yit,:], color=plt.cm.rainbow(yit/max(y)))
def show_clusters( y_kmeans, Vorg, title=None):
set_y = set(y_kmeans)
for i in set_y:
c = plt.cm.rainbow_r(i/max(set_y))
sns.tsplot( Vorg[y_kmeans==i,:], color=c)
plt.xlabel('Time')
plt.ylabel('Maganitude')
if title:
plt.title(title)
|
mit
|
argenortega/pgm
|
pgm/nodes/Node.py
|
1
|
2519
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pgm.nodes.node
Node
"""
__author__ = "Argentina Ortega Sainz"
__copyright__ = "Copyright 2015, Argentina Ortega Sainz"
__credits__ = ["Argentina Ortega Sainz"]
__date__ = "July 15, 2015"
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Argentina Ortega Sainz"
__email__ = "[email protected]"
__status__ = "Development"
import pydot
from pandas import DataFrame, MultiIndex
import numpy as np
class Node(object):
def __init__(self, name='Node', parents=None, children=None):
self._name = name
if parents is None:
self._parents = []
else:
self._parents = parents[:]
if children is None:
self._children = []
else:
self._children = children[:]
self._neighbors = []
self._neighbors.extend(self.parents)
self._neighbors.extend(self.children)
def __iter__(self):
return self
def __hash__(self):
return hash(self.name)
@property
def graph(self):
return pydot.Node(name=self._name, shape='ellipse')
def __str__(self):
return self._name
def __repr__(self):
return '<Node %s>' % self._name
@property
def domain(self):
return self._domain
@property
def parents(self):
return self._parents
@property
def children(self):
return self._children
@property
def name(self):
return self._name
@domain.setter
def domain(self, value):
self._domain = value
@parents.setter
def parents(self, value):
# TODO: check probability tables when assigning parents (keep values if there are any)
# TODO: update validate method to only check for right dimensions in table
self._parents = value
@children.setter
def children(self, value):
self._children = value
@name.setter
def name(self, value):
self._name = value
class Table(object):
def __init__(self, node_name):
self._name = node_name
self.m = 1
self.n = 1
@property
def table(self):
return self._table
@table.setter
def table(self, value):
assert value.shape == (self.m, self.n)
self._table = DataFrame(value, index=self.rows, columns=self.cols)
#print self._table
def __repr__(self):
return '<Table %s>' % self._name
def __str__(self):
return self._table.to_string()
|
gpl-3.0
|
walterreade/scikit-learn
|
examples/neural_networks/plot_rbm_logistic_classification.py
|
99
|
4608
|
"""
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
NicoliAraujo/rna-precipitacao-manaus
|
PrevisaoTempo/modules/input_data_manipulators/CDMFromTxtToCSV.py
|
1
|
8024
|
# -*- coding: utf-8 -*-
'''
Created on 2 de mai de 2016
@author: Nicoli Araujo
'''
import FromTxtToCSV
import numpy as np
import pandas as pd
# import seaborn as sns
class CDMFromTxtToCSV(FromTxtToCSV):
'''
Vem diretamente do ClimateDataMao.txt original.
Gera um arquivo:
Date AHT ALT ARH WS RAINFALL
1-10-1970 30.8 21.3 89 1.0 29.5
2-10-1970 34.2 24.3 79.25 6.8 18.2
3-10-1970 33.9 23.5 75.25 6.17 7
4-10-1970 34 24.9 80.25 5.0 0
5-10-1970 33.4 22.8 79 3.1 3.9
6-10-1970 34.6 25.9 74.5 4.23 0
para cada mês.
Não descarta os nans.
'''
# def setNewFileData(self, name):
# with open(name, 'w') as file:
def setCollumns(self):
'''cria listas com os dados retirados do arquivo de entrada para ser manipulados posteriormente'''
for line in self.OldFileData:
cont = 0
fim = 0
inicio = 0
for i in line:
fim += 1
if (i == ";"):
cont += 1
if (cont == 2):
self.colDate.append(line[inicio:fim - 1])
elif (cont == 5):
self.colAHT.append(line[inicio:fim - 1])
elif (cont == 6):
self.colALT.append(line[inicio:fim - 1])
elif (cont == 7):
self.colARH.append(line[inicio:fim - 1])
elif (cont == 8):
self.colWS.append(line[inicio:fim - 1])
elif (cont == 4):
self.colRainfall.append(line[inicio:fim - 1])
inicio = fim
def setDateSeparator(self, dateList):
newDateList = []
for line in dateList:
line = str(int(line[:2])) + '-' + str(int(line[3:5])) + '-' + str(int(line[6:]))
newDateList.append(line)
return newDateList
def setDataFrame(self):
self.dataFrame = pd.DataFrame({self.labelList[0] : self.colAHT,
self.labelList[1] : self.colALT,
self.labelList[2] : self.colARH,
self.labelList[3] : self.colWS,
self.labelList[4] : self.colRainfall },
index=self.colDate)
self.dataFrame = self.dataFrame[self.dataFrame.index != '']
self.dataFrame = self.dataFrame.replace(['', ' '], [np.nan, np.nan])
self.dataFrame.index.name = 'Date'
# print(self.dataFrame)
def unifyDates(self):
'''
unifica dados de um dia em uma só linha
'''
for i in range(0, len(self.colDate)):
if (self.colDate[i] == self.colDate[i - 1]):
if self.colAHT[i] == '':
self.colAHT[i] = self.colAHT[i - 1]
if self.colALT[i] == '':
self.colALT[i] = self.colALT[i - 1]
if self.colARH[i] == '':
self.colARH[i] = self.colARH[i - 1]
if self.colWS[i] == '':
self.colWS[i] = self.colWS[i - 1]
if self.colRainfall[i] == '':
self.colRainfall[i] = self.colRainfall[i - 1]
self.colDate[i - 1] = ''
def setData(self, path):
'''
pega os dados localizados em path, padroniza e os transfere para um dataFrame
'''
self.getOldFileData(path)
self.removeFirstLines(16)
self.setCollumns()
self.truncListNumbers(self.colWS)
self.colDate = self.setDateSeparator(self.colDate)
self.unifyDates()
self.setDataFrame()
def startColumnList(self):
'''
Inicializa as listas que conterão cada coluna de dados do dataframe
'''
self.colDate = []
self.colRainfall = []
self.colWS = []
self.colARH = []
self.colALT = []
self.colAHT = []
def exportMonth(self, month):
'''
exporta os dados de um mês para um arquivo csv - month é um inteiro de 01 a 12
'''
(start, end) = self.getLimitYears()
dfExport = pd.DataFrame()
filename = './Data/files/monthly/RainfallByDay/' + month + 'd.csv'
lastMonthDay = {'1':'31', '2' : '28', '2B': '29', '3':'31', '4':'30', '5':'31', '6':'30',
'7':'31', '8':'31', '9':'30', '10':'31', '11':'30', '12':'31'}
with open(filename, 'w') as file:
for year in range(start, end + 1):
startDate = '1-' + month + '-' + str(year)
endDate = lastMonthDay[month] + '-' + month + '-' + str(year)
try:
if year == start:
pd.concat([dfExport, self.dataFrame.loc[startDate:endDate, self.labelList]], axis=1).to_csv(file)
else:
pd.concat([dfExport, self.dataFrame.loc[startDate:endDate, self.labelList]], axis=1).to_csv(file, header=False)
except:
if year % 4 == 0 and month == '2':
endDate = lastMonthDay['2B'] + '-' + month + '-' + str(year)
if year == start:
pd.concat([dfExport, self.dataFrame.loc[startDate:endDate, self.labelList]], axis=1).to_csv(file)
else:
pd.concat([dfExport, self.dataFrame.loc[startDate:endDate, self.labelList]], axis=1).to_csv(file, header=False)
def printMonth(self, month):
'''
imprime os dados de um mês
'''
(start, end) = self.getLimitYears()
lastMonthDay = {'1':'31', '2' : '28', '2B': '29', '3':'31', '4':'30', '5':'31', '6':'30',
'7':'31', '8':'31', '9':'30', '10':'31', '11':'30', '12':'31'}
for year in range(start, end):
startDate = '1-' + month + '-' + str(year)
endDate = lastMonthDay[month] + '-' + month + '-' + str(year)
try:
print(self.dataFrame.loc[startDate:endDate, self.labelList])
except:
if year % 4 == 0 and month == '2':
endDate = lastMonthDay['2B'] + '-' + month + '-' + str(year)
print(self.dataFrame.loc[startDate:endDate, self.labelList])
def countEmptyData(self, dataList):
'''
Conta quantos dados vazios existem no dataframe e os imprime
'''
cont = cont1 = 0
for i in dataList:
cont1 += 1
if i == "":
cont += 1
print(cont, cont1)
def __init__(self, name):
'''
Constructor
'''
'''name = txt name'''
self.labelList = ['AHT', # average highest temperature
'ALT', # average lowest temperature
'ARH', # average relative humidity
'WS', # wind speed
'RAINFALL' # precipitação
]
self.startColumnList()
self.setData(name)
# sns.boxplot(x=months,y="RAINFALL", hue="RAINFALL", data=self.dataFrame,palette="Greys")
# sns.plt.show()'''
for i in range (1, 13):
self.exportMonth(str(i))
# self.exportMonth('1')
# self.printMonth('1')
# print(self.dataFrame.loc['23-1-1995',['AHT','ALT', 'URM', 'WS', 'PRECIPITAÇÃO']])
|
gpl-3.0
|
UCL-CS35/incdb-poc
|
venv/share/doc/dipy/examples/linear_fascicle_evaluation.py
|
4
|
11620
|
"""
=================================================
Linear fascicle evaluation (LiFE)
=================================================
Evaluating the results of tractography algorithms is one of the biggest
challenges for diffusion MRI. One proposal for evaluation of tractography
results is to use a forward model that predicts the signal from each of a set of
streamlines, and then fit a linear model to these simultaneous predictions
[Pestilli2014]_.
We will use streamlines generated using probabilistic tracking on CSA
peaks. For brevity, we will include in this example only streamlines going
through the corpus callosum connecting left to right superior frontal
cortex. The process of tracking and finding these streamlines is fully
demonstrated in the `streamline_tools.py` example. If this example has been
run, we can read the streamlines from file. Otherwise, we'll run that example
first, by importing it. This provides us with all of the variables that were
created in that example:
"""
import numpy as np
import os.path as op
import nibabel as nib
import dipy.core.optimize as opt
if not op.exists('lr-superiorfrontal.trk'):
from streamline_tools import *
else:
# We'll need to know where the corpus callosum is from these variables:
from dipy.data import (read_stanford_labels,
fetch_stanford_t1,
read_stanford_t1)
hardi_img, gtab, labels_img = read_stanford_labels()
labels = labels_img.get_data()
cc_slice = labels == 2
fetch_stanford_t1()
t1 = read_stanford_t1()
t1_data = t1.get_data()
data = hardi_img.get_data()
# Read the candidates from file in voxel space:
candidate_sl = [s[0] for s in nib.trackvis.read('lr-superiorfrontal.trk',
points_space='voxel')[0]]
"""
The streamlines that are entered into the model are termed 'candidate
streamliness' (or a 'candidate connectome'):
"""
"""
Let's visualize the initial candidate group of streamlines in 3D, relative to the
anatomical structure of this brain:
"""
from dipy.viz.colormap import line_colors
from dipy.viz import fvtk
candidate_streamlines_actor = fvtk.streamtube(candidate_sl,
line_colors(candidate_sl))
cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)],
opacities=[1.])
vol_actor = fvtk.slicer(t1_data)
vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)
# Add display objects to canvas
ren = fvtk.ren()
fvtk.add(ren, candidate_streamlines_actor)
fvtk.add(ren, cc_ROI_actor)
fvtk.add(ren, vol_actor)
fvtk.add(ren, vol_actor2)
fvtk.record(ren, n_frames=1, out_path='life_candidates.png',
size=(800, 800))
"""
.. figure:: life_candidates.png
:align: center
**Candidate connectome before life optimization**
"""
"""
Next, we initialize a LiFE model. We import the `dipy.tracking.life` module,
which contains the classes and functions that implement the model:
"""
import dipy.tracking.life as life
fiber_model = life.FiberModel(gtab)
"""
Since we read the streamlines from a file, already in the voxel space, we do not
need to transform them into this space. Otherwise, if the streamline coordinates
were in the world space (relative to the scanner iso-center, or relative to the
mid-point of the AC-PC-connecting line), we would use this::
inv_affine = np.linalg.inv(hardi_img.get_affine())
the inverse transformation from world space to the voxel space as the affine for
the following model fit.
The next step is to fit the model, producing a `FiberFit` class instance, that
stores the data, as well as the results of the fitting procedure.
The LiFE model posits that the signal in the diffusion MRI volume can be
explained by the streamlines, by the equation
.. math::
y = X\beta
Where $y$ is the diffusion MRI signal, $\beta$ are a set of weights on the
streamlines and $X$ is a design matrix. This matrix has the dimensions $m$ by
$n$, where $m=n_{voxels} \cdot n_{directions}$, and $n_{voxels}$ is the set of
voxels in the ROI that contains the streamlines considered in this model. The
$i^{th}$ column of the matrix contains the expected contributions of the
$i^{th}$ streamline (arbitrarly ordered) to each of the voxels. $X$ is a sparse
matrix, because each streamline traverses only a small percentage of the
voxels. The expected contributions of the streamline are calculated using a
forward model, where each node of the streamline is modeled as a cylindrical
fiber compartment with Gaussian diffusion, using the diffusion tensor model. See
[Pestilli2014]_ for more detail on the model, and variations of this model.
"""
fiber_fit = fiber_model.fit(data, candidate_sl, affine=np.eye(4))
"""
The `FiberFit` class instance holds various properties of the model fit. For
example, it has the weights $\beta$, that are assigned to each streamline. In
most cases, a tractography through some region will include redundant
streamlines, and these streamlines will have $\beta_i$ that are 0.
"""
import matplotlib.pyplot as plt
import matplotlib
fig, ax = plt.subplots(1)
ax.hist(fiber_fit.beta, bins=100, histtype='step')
ax.set_xlabel('Fiber weights')
ax.set_ylabel('# fibers')
fig.savefig('beta_histogram.png')
"""
.. figure:: beta_histogram.png
:align: center
**LiFE streamline weights**
"""
"""
We use $\beta$ to filter out these redundant streamlines, and generate an
optimized group of streamlines:
"""
optimized_sl = list(np.array(candidate_sl)[np.where(fiber_fit.beta>0)[0]])
ren = fvtk.ren()
fvtk.add(ren, fvtk.streamtube(optimized_sl, line_colors(optimized_sl)))
fvtk.add(ren, cc_ROI_actor)
fvtk.add(ren, vol_actor)
fvtk.record(ren, n_frames=1, out_path='life_optimized.png',
size=(800, 800))
"""
.. figure:: life_optimized.png
:align: center
**Streamlines selected via LiFE optimization**
"""
"""
The new set of streamlines should do well in fitting the data, and redundant
streamlines have presumably been removed (in this case, about 50% of the
streamlines).
But how well does the model do in explaining the diffusion data? We can
quantify that: the `FiberFit` class instance has a `predict` method, which can
be used to invert the model and predict back either the data that was used to
fit the model, or other unseen data (e.g. in cross-validation, see
:ref:`kfold_xval`).
Without arguments, the `.predict()` method will predict the diffusion signal
for the same gradient table that was used in the fit data, but `gtab` and `S0`
key-word arguments can be used to predict for other acquisition schemes and
other baseline non-diffusion-weighted signals.
"""
model_predict = fiber_fit.predict()
"""
We will focus on the error in prediction of the diffusion-weighted data, and
calculate the root of the mean squared error.
"""
model_error = model_predict - fiber_fit.data
model_rmse = np.sqrt(np.mean(model_error[:, 10:] ** 2, -1))
"""
As a baseline against which we can compare, we calculate another error term. In
this case, we assume that the weight for each streamline is equal
to zero. This produces the naive prediction of the mean of the signal in each
voxel.
"""
beta_baseline = np.zeros(fiber_fit.beta.shape[0])
pred_weighted = np.reshape(opt.spdot(fiber_fit.life_matrix, beta_baseline),
(fiber_fit.vox_coords.shape[0],
np.sum(~gtab.b0s_mask)))
mean_pred = np.empty((fiber_fit.vox_coords.shape[0], gtab.bvals.shape[0]))
S0 = fiber_fit.b0_signal
"""
Since the fitting is done in the demeaned S/S0 domain, we need
to add back the mean and then multiply by S0 in every voxel:
"""
mean_pred[..., gtab.b0s_mask] = S0[:, None]
mean_pred[..., ~gtab.b0s_mask] =\
(pred_weighted + fiber_fit.mean_signal[:, None]) * S0[:, None]
mean_error = mean_pred - fiber_fit.data
mean_rmse = np.sqrt(np.mean(mean_error ** 2, -1))
"""
First, we can compare the overall distribution of errors between these two
alternative models of the ROI. We show the distribution of differences in error
(improvement through model fitting, relative to the baseline model). Here,
positive values denote an improvement in error with model fit, relative to
without the model fit.
"""
fig, ax = plt.subplots(1)
ax.hist(mean_rmse - model_rmse, bins=100, histtype='step')
ax.text(0.2, 0.9,'Median RMSE, mean model: %.2f' % np.median(mean_rmse),
horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
ax.text(0.2, 0.8,'Median RMSE, LiFE: %.2f' % np.median(model_rmse),
horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
ax.set_xlabel('RMS Error')
ax.set_ylabel('# voxels')
fig.savefig('error_histograms.png')
"""
.. figure:: error_histograms.png
:align: center
**Improvement in error with fitting of the LiFE model**.
"""
"""
Second, we can show the spatial distribution of the two error terms,
and of the improvement with the model fit:
"""
vol_model = np.ones(data.shape[:3]) * np.nan
vol_model[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = model_rmse
vol_mean = np.ones(data.shape[:3]) * np.nan
vol_mean[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse
vol_improve = np.ones(data.shape[:3]) * np.nan
vol_improve[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse - model_rmse
sl_idx = 49
from mpl_toolkits.axes_grid1 import AxesGrid
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95)
ax = AxesGrid(fig, 111,
nrows_ncols = (1, 3),
label_mode = "1",
share_all = True,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad="5%")
ax[0].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[0].matshow(np.rot90(vol_model[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[0].colorbar(im)
ax[1].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[1].matshow(np.rot90(vol_mean[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[1].colorbar(im)
ax[2].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[2].matshow(np.rot90(vol_improve[sl_idx, :, :]), cmap=matplotlib.cm.RdBu)
ax.cbar_axes[2].colorbar(im)
for lax in ax:
lax.set_xticks([])
lax.set_yticks([])
fig.savefig("spatial_errors.png")
"""
.. figure:: spatial_errors.png
:align: center
**Spatial distribution of error and improvement**
"""
"""
This image demonstrates that in many places, fitting the LiFE model results in
substantial reduction of the error.
Note that for full-brain tractographies *LiFE* can require large amounts of
memory. For detailed memory profiling of the algorithm, based on the
streamlines generated in :ref:`example_probabilistic_fiber_tracking`, see `this
IPython notebook
<http://nbviewer.ipython.org/gist/arokem/bc29f34ebc97510d9def>`_.
For the Matlab implementation of LiFE, head over to `Franco Pestilli's github
webpage <http://francopestilli.github.io/life/>`_.
References
~~~~~~~~~~~~~~~~~~~~~~
.. [Pestilli2014] Pestilli, F., Yeatman, J, Rokem, A. Kay, K. and Wandell
B.A. (2014). Validation and statistical inference in living
connectomes. Nature Methods 11:
1058-1063. doi:10.1038/nmeth.3098
.. include:: ../links_names.inc
"""
|
bsd-2-clause
|
cainiaocome/scikit-learn
|
examples/classification/plot_classifier_comparison.py
|
181
|
4699
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
|
bsd-3-clause
|
victorbergelin/scikit-learn
|
benchmarks/bench_plot_ward.py
|
290
|
1260
|
"""
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
|
bsd-3-clause
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/decomposition/tests/test_fastica.py
|
70
|
7808
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
mit
|
alexsavio/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
46
|
41270
|
from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
# This test was expanded (added scaled_down) in response to github
# issue #3864 (and others), where overly aggressive rounding was causing
# problems for users with very small y_score values
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled_up)
assert_equal(roc_auc, roc_auc_scaled_down)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred)
pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled_up)
assert_equal(pr_auc, pr_auc_scaled_down)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
|
bsd-3-clause
|
kmkolasinski/Quantulaba
|
tests/benchmark_output/plot_Tests.py
|
4
|
1336
|
#!/usr/bin/python
"""
Created on Thu Mar 5 14:16:21 2015
@author: Krzysztof Kolasinski
"""
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
nTest = sys.argv[1]
file = "Test"+nTest+".dat"
print "Plotting test:",file
plt.clf()
data = np.loadtxt(file)
no_lines = np.size(data[0,:])
x = data[:,0]
T_exact = data[:,1]
eps = 10e-15
dT_auto = abs(T_exact-data[:,2]+eps)
dT_ggev = abs(T_exact-data[:,3]+eps)
dT_schur = abs(T_exact-data[:,4]+eps)
#plt.plot(x,T_exact,c='k',ls='-')
#plt.plot(x,data[:,3],c='k',ls='-')
ax = plt.subplot(211)
plt.yscale("log")
plt.plot(x,dT_auto,c='r',ls='-',label="Auto method")
plt.plot(x,dT_ggev,c='k',ls='-',label="GGEV method")
plt.plot(x,dT_schur,c='b',ls='-',label="Schur method")
ax2 = ax.twinx()
ax2.plot(x, T_exact, 'r.')
ax2.set_ylabel('sin', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax.legend(loc="lower right")
ax.set_ylabel("Error")
plt.ylabel("Num. modes")
ax = plt.subplot(212)
plt.plot(x,data[:,5],c='r',ls='-',label="Auto method")
plt.plot(x,data[:,6],c='k',ls='-',label="GGEV method")
plt.plot(x,data[:,7],c='b',ls='-',label="Schur method")
ax.set_xlabel("Ef [meV]")
ax.set_ylabel("CPU time [s]")
#ax.legend()
plt.savefig(file+".png")
#plt.show()
|
mit
|
baklanovp/pystella
|
obs.py
|
1
|
7748
|
#!/usr/bin/env python3
# #!/usr/bin/python3
import getopt
# matplotlib.use("Agg")
# matplotlib.rcParams['backend'] = "TkAgg"
# matplotlib.rcParams['backend'] = "Qt4Agg"
import math
import os
import sys
from os.path import dirname
import matplotlib.pyplot as plt
from matplotlib import gridspec
import pystella.util.callback as cb
from pystella.rf import band
__author__ = 'bakl'
ROOT_DIRECTORY = dirname(dirname(os.path.abspath(__file__)))
def plot_grid(call, bnames, xlim=None, ylim=None, **kwargs):
title = kwargs.get('title', '')
markersize = kwargs.get('markersize', 9)
# setup figure
plt.matplotlib.rcParams.update({'font.size': kwargs.get('fontsize', 14)})
nrows = math.ceil(len(bnames)/2)
ncols = 1 if len(bnames) == 1 else 2
# if len(bnames) > 1:
fig, axs = plt.subplots(nrows, ncols, sharex='col', sharey='row', figsize=(8, 8))
# else:
# fig, axs = plt.subplots(1, 1, sharex='col', sharey='row', figsize=(8, 8))
plt.subplots_adjust(wspace=0, hspace=0)
for i, bname in enumerate(bnames):
icol = i % ncols
irow = int(i / ncols)
if nrows > 1:
ax = axs[irow, icol]
elif nrows == 1 and ncols > 1:
ax = axs[icol]
else:
ax = axs
if icol == 1:
ax = ax.twinx()
# plot callback
if call is not None:
call.plot(ax, {'bnames': [bname], 'bcolors': {bname: 'black'}, 'markersize': markersize})
# if icol == 0:
ax.set_ylabel('Magnitude')
if irow == int(len(bnames) / 2) - 1:
ax.set_xlabel('Time [days]')
props = dict(facecolor='wheat')
# props = dict(boxstyle='round', facecolor='white')
# props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(.95, .9, bname, horizontalalignment='right', transform=ax.transAxes, bbox=props)
ax.legend(prop={'size': 8}, loc=4)
# if icol == 0:
ax.invert_yaxis()
if xlim is not None:
# xlim = ax.get_xlim()
ax.set_xlim(xlim)
if ylim is not None:
# ylim = ax.get_ylim()
ax.set_ylim(ylim)
if kwargs.get('is_grid', False):
ax.grid(linestyle=':')
if title:
plt.title(title)
return fig
def plot_all(call, bnames, xlim=None, ylim=None, **kwargs):
# xlim = None, ylim = None, is_time_points = False, title = '', bshift = None
title = kwargs.get('title', '')
markersize = kwargs.get('markersize', 9)
# setup figure
plt.matplotlib.rcParams.update({'font.size': 12})
fig = plt.figure(figsize=(12, 12))
# fig = plt.figure(num=None, figsize=(12, 12), dpi=100, facecolor='w', edgecolor='k')
gs1 = gridspec.GridSpec(1, 1)
axUbv = fig.add_subplot(gs1[0, 0])
gs1.update(wspace=0.3, hspace=0.3, left=0.1, right=0.95)
call.plot(axUbv, {'bnames': bnames, 'markersize': markersize})
# finish plot
axUbv.set_ylabel('Magnitude')
axUbv.set_xlabel('Time [days]')
axUbv.minorticks_on()
if xlim is not None:
# xlim = ax.get_xlim()
axUbv.set_xlim(xlim)
axUbv.invert_yaxis()
if ylim is not None:
# ylim = ax.get_ylim()
axUbv.set_ylim(ylim)
axUbv.legend(prop={'size': 8}, loc=4)
# ax.set_title(bset)
if title:
axUbv.set_title(title)
axUbv.grid(linestyle=':')
return fig
def usage():
print("Usage:")
print(" obs.py [params]")
print(" -b <bands:shift>: string, default: U-B-V-R-I.\n"
" shift: to move lc along y-axe (minus is '_', for example -b R:2-V-I:_4-B:5 ")
print(" -c <callback> [lcobs:fname:marker:dt:dm, popov[:R:M:E[FOE]:Mni]]. "
"You can add parameters in format func:params")
print(" -g <single, grid, gridm, gridl> Select plot view. single [default] = all models in one figure"
", grid = for each band separate figure.")
print(" -s without extension. Save plot to pdf-file. Default: if 1, fname = 'ubv_obs.pdf'")
print(" -x <xbeg:xend> - xlim, ex: 0:12. Default: None, used all days.")
print(" -l write plot label")
print(" -h print usage")
print(" --- ")
band.print_bands()
def old_lc_wrapper(param, p=None):
a = param.split(':')
fname = a.pop(0)
if p is None:
if os.path.isfile(fname + '.py'):
p, fname = os.path.split(fname)
elif os.path.isfile(os.path.join(os.getcwd(), fname + '.py')):
p = os.getcwd()
else:
p = cb.plugin_path
print("Call: {} from {}".format(fname, p))
c = cb.CallBack(fname, path=p, args=a, load=1)
print("Call: %s from %s" % (c.Func, c.FuncFileFull))
return c
def main():
view_opts = ('single', 'grid', 'gridl', 'gridm')
opt_grid = view_opts[0]
label = None
fsave = None
callback = None
xlim = None
ylim = None
band.Band.load_settings()
try:
opts, args = getopt.getopt(sys.argv[1:], "hqc:g:b:l:s:x:y:")
except getopt.GetoptError as err:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
if len(opts) == 0:
usage()
sys.exit(2)
bnames = None
# bnames = ['U', 'B', 'V', 'R', "I"]
# bands = ['U', 'B', 'V', 'R', "I", 'UVM2', "UVW1", "UVW2", 'g', "r", "i"]
for opt, arg in opts:
if opt == '-b':
bnames = []
for b in str(arg).split('-'):
# extract band shift
if ':' in b:
bname, shift = b.split(':')
bshift = {}
if '_' in shift:
bshift[bname] = -float(shift.replace('_', ''))
else:
bshift[bname] = float(shift)
else:
bname = b
if not band.is_exist(bname):
print('No such band: ' + bname)
sys.exit(2)
bnames.append(bname)
continue
if opt == '-c':
c = cb.lc_wrapper(str(arg))
if callback is not None:
c = cb.CallBackArray((callback, c))
callback = c
continue
if opt == '-g':
opt_grid = str.strip(arg).lower()
if opt_grid not in view_opts:
print('No such view option: {0}. Can be '.format(opt_grid, '|'.join(view_opts)))
sys.exit(2)
continue
if opt == '-l':
label = str.strip(arg)
continue
if opt == '-s':
fsave = str.strip(arg)
continue
if opt == '-x':
xlim = list(float(x) for x in str(arg).split(':'))
continue
if opt == '-y':
ylim = list(float(x) for x in str(arg).split(':'))
continue
elif opt == '-h':
usage()
sys.exit(2)
if callback is None:
print('No obs data. You my use lcobs or other callbacks.')
usage()
sys.exit(2)
if opt_grid in view_opts[1:]:
sep = opt_grid[:-1]
if sep == 'd':
sep = 'l' # line separator
fig = plot_grid(callback, bnames, xlim=xlim, ylim=ylim, sep=sep, is_grid=False)
else:
fig = plot_all(callback, bnames, xlim=xlim, ylim=ylim, title=label)
plt.show()
# plt.show(block=False)
if fsave is not None:
if fsave == 1:
fsave = "ubv_obs"
d = os.path.expanduser('~/')
fsave = os.path.join(d, os.path.splitext(fsave)[0]) + '.pdf'
print("Save plot to %s " % fsave)
fig.savefig(fsave, bbox_inches='tight')
if __name__ == '__main__':
main()
|
mit
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/pandas/tests/indexes/period/test_indexing.py
|
9
|
12184
|
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas.compat import lrange
from pandas._libs import tslib
from pandas import (PeriodIndex, Series, DatetimeIndex,
period_range, Period, _np_version_under1p9)
class TestGetItem(object):
def setup_method(self, method):
pass
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx[0]
assert result == pd.Period('2011-01-01', freq='D')
result = idx[-1]
assert result == pd.Period('2011-01-31', freq='D')
result = idx[0:5]
expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[0:10:2]
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05',
'2011-01-07', '2011-01-09'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[-20:-5:3]
expected = pd.PeriodIndex(['2011-01-12', '2011-01-15',
'2011-01-18',
'2011-01-21', '2011-01-24'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[4::-1]
expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
def test_getitem_index(self):
idx = period_range('2007-01', periods=10, freq='M', name='x')
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(['2007-02', '2007-04', '2007-06'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False,
True, True, False, False, False]]
exp = pd.PeriodIndex(['2007-01', '2007-02', '2007-06', '2007-07'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
pytest.raises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
assert (result.index.year == 2008).all()
result = ts['2008':'2009']
assert len(result) == 24
result = ts['2008-1':'2009-12']
assert len(result) == 24
result = ts['2008Q1':'2009Q4']
assert len(result) == 24
result = ts[:'2009']
assert len(result) == 36
result = ts['2009':]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
tm.assert_raises_regex(KeyError,
"left slice bound for non-unique "
"label: '2008'",
ts.__getitem__, slice('2008', '2009'))
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert idx[0] == pd.Period('2011-01', freq='M')
assert idx[1] is tslib.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert (s[pd.Period('2011-01', freq='M')] ==
pd.Period('2011-01', freq='M'))
assert s[pd.NaT] is tslib.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start='2012-01-01', periods=10, freq='D')
ts = Series(lrange(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period('2012-01-02', freq='D')]], exp)
def test_getitem_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S',
periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with pytest.raises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
tm.assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH 6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with pytest.raises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01'], s[0:31])
tm.assert_series_equal(s['2013/02'], s[31:59])
tm.assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with pytest.raises(KeyError):
s[v]
class TestIndexing(object):
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
pytest.raises(KeyError, idx.get_loc, bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03'])
pidx = PeriodIndex(['2011-01-01', 'NaT', '2011-01-03'], freq='M')
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float('nan')) == 1
assert idx.get_loc(np.nan) == 1
def test_take(self):
# GH 10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period('2011-01-01', freq='D')
result = idx.take([5])
assert result == pd.Period('2011-01-06', freq='D')
result = idx.take([0, 1, 2])
expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == 'D'
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05'], freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(['2011-01-08', '2011-01-05',
'2011-01-02'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([3, 2, 5])
expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([-3, 2, 5])
expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
def test_take_misc(self):
index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D',
name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = pd.PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx', freq='D')
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
|
mit
|
akionakamura/scikit-learn
|
sklearn/qda.py
|
140
|
7682
|
"""
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
bsd-3-clause
|
tri-state-epscor/wcwave_adaptors
|
vwpy/watershed.py
|
2
|
27496
|
"""
Virtual Watershed Adaptor. Handles fetching and searching of data, model
run initialization, and pushing of data. Does this for associated metadata
as well. Each file that's either taken as input or produced as output gets
associated metadata.
"""
import configparser
import json
import logging
import pandas as pd
import os
import requests
requests.packages.urllib3.disable_warnings()
import subprocess
import urllib
from datetime import datetime, date, timedelta
from jinja2 import Environment, FileSystemLoader
VARNAME_DICT = \
{
'in': ["I_lw", "T_a", "e_a", "u", "T_g", "S_n"],
'em': ["R_n", "H", "L_v_E", "G", "M", "delta_Q", "E_s", "melt",
"ro_predict", "cc_s"],
'snow': ["z_s", "rho", "m_s", "h2o", "T_s_0", "T_s_l", "T_s",
"z_s_l", "h2o_sat"],
'init': ["z", "z_0", "z_s", "rho", "T_s_0", "T_s", "h2o_sat"],
'precip': ["m_pp", "percent_snow", "rho_snow", "T_pp"],
'mask': ["mask"],
'dem': ["alt"]
}
class VWClient:
"""
Client class for interacting with a Virtual Watershed (VW). A VW
is essentially a structured database with certain rules for its
metadata and for uploading or inserting data.
"""
# number of times to re-try an http request
_retry_num = 3
def __init__(self, host_url, uname, passwd):
""" Initialize a new connection to the virtual watershed """
self.host_url = host_url
# Check our credentials
auth_url = host_url + "/apilogin"
self.sesh = requests.session()
l = self.sesh.get(auth_url, auth=(uname, passwd), verify=False)
l.raise_for_status()
self.uname = uname
self.passwd = passwd
# Initialize URLS used by class methods
self.insert_dataset_url = host_url + "/apps/vwp/datasets"
self.data_upload_url = host_url + "/apps/vwp/data"
self.uuid_check_url = host_url + "/apps/vwp/checkmodeluuid"
self.dataset_search_url = \
host_url + "/apps/vwp/search/datasets.json?version=3"
self.modelrun_search_url = \
host_url + "/apps/vwp/search/modelruns.json?version=3"
self.modelrun_delete_url = host_url + "/apps/vwp/deletemodelid"
self.new_run_url = host_url + "/apps/vwp/newmodelrun"
self.gettoken_url = host_url + "/gettoken"
def initialize_modelrun(self, model_run_name=None, description=None,
researcher_name=None, keywords=None):
"""Iniitalize a new model run.
Args:
model_run_name (str): is the name for the new model run
description (str): a description of the new model run
researcher_name (str): contact person for the model run
keywords (str): comma-separated list of keywords associated with
model run
Returns:
(str) a newly-intialized model_run_uuid
"""
assert description, \
"You must provide a description for your new model run"
assert model_run_name, \
"You must provide a model_run_name for your new model run"
assert researcher_name, \
"You must provide a researcher_name for your new model run"
assert keywords, \
"You must provide keywords for your new model run"
data = {'model_run_name': model_run_name,
'description': description,
'model_keywords': keywords,
'researcher_name': researcher_name}
# TODO make this class-level
auth = (self.uname, self.passwd)
result = self.sesh.post(self.new_run_url, data=json.dumps(data),
auth=auth, verify=False)
result.raise_for_status()
model_run_uuid = result.text
return model_run_uuid
def modelrun_search(self, **kwargs):
"""
Get a list of model runs in the database. Currently no actual "search"
(see, e.g. dataset_search) is available from the Virtual Watershed
Data API.
Returns:
(QueryResult) A query result, containing total records matching,
the number of results returned (subtotal), and the records
themselves, which is a list of dict.
"""
full_url = _build_query(self.modelrun_search_url, **kwargs)
r = self.sesh.get(full_url, verify=False)
return QueryResult(r.json())
def dataset_search(self, **kwargs):
"""
Search the VW for JSON metadata records with matching parameters.
Use key, value pairs as specified in the `Virtual Watershed
Documentation
<http://vwp-dev.unm.edu/docs/stable/search.html#search-objects>`_
Returns:
(QueryResult) A query result, containing total records matching,
the number of results returned (subtotal), and the records
themselves, which is a list of dict.
"""
full_url = _build_query(self.dataset_search_url, **kwargs)
r = self.sesh.get(full_url, verify=False)
return QueryResult(r.json())
def download(self, url, out_file):
"""
Download a file from the VW using url to out_file on local disk
Returns:
None
Raises:
AssertionError: assert that the status code from downloading is 200
"""
data = urllib.urlopen(url)
assert data.getcode() == 200, "Download Failed!"
with file(out_file, 'w+') as out:
out.writelines(data.readlines())
return None
def insert_metadata(self, watershed_metadata):
""" Insert metadata to the virtual watershed. The data that gets
uploaded is the FGDC XML metadata.
Returns:
(requests.Response) Returned so that the user may inspect
the response.
"""
num_tries = 0
while num_tries < self._retry_num:
try:
result = self.sesh.put(self.insert_dataset_url,
data=watershed_metadata,
auth=(self.uname, self.passwd),
verify=False)
logging.debug(result.content)
result.raise_for_status()
return result
except requests.HTTPError:
num_tries += 1
continue
return result
def upload(self, model_run_uuid, data_file_path):
"""
Upload data for a given model_run_uuid to the VW
Returns:
None
Raises:
requests.HTTPError: if the file cannot be successfully uploaded
"""
# currently 'name' is unused
dataPayload = {'name': os.path.basename(data_file_path),
'modelid': model_run_uuid}
num_tries = 0
while num_tries < self._retry_num:
try:
result = \
self.sesh.post(self.data_upload_url, data=dataPayload,
files={'file': open(data_file_path, 'rb')},
auth=(self.uname, self.passwd), verify=False)
result.raise_for_status()
return result
except requests.HTTPError:
num_tries += 1
continue
raise requests.HTTPError()
def swift_upload(self, model_run_uuid, data_file_path):
"""
Use the Swift client from openstack to upload data.
(http://docs.openstack.org/cli-reference/content/swiftclient_commands.html)
Seems to outperform 'native' watershed uploads via HTTP.
Returns:
None
Raises:
requests.HTTPError if the file cannot be successfully uploaded
"""
_swift_upload_url = self.host_url + '/apps/vwp/swiftdata'
segmentsize = 1073741824 # 1 Gig
token_resp = self.sesh.get(self.gettoken_url).text
token = json.loads(token_resp)
preauth_url = token['preauthurl']
preauth_token = token['preauthtoken']
# the container name is model_run_uuid
command = ['swift', 'upload', model_run_uuid, '-S', str(segmentsize),
data_file_path, '--os-storage-url=' + preauth_url,
'--os-auth-token=' + preauth_token]
output = subprocess.check_output(command)
# after we upload to a swift directory we ask VW to download
vw_dl_params = {'modelid': model_run_uuid, 'filename': data_file_path,
'preauthurl': preauth_url,
'preauthtoken': preauth_token}
res = self.sesh.get(_swift_upload_url, params=vw_dl_params)
if res.status_code != 200:
raise requests.HTTPError(
"Swift Upload Failed! Server response:\n" + res.text)
return output
def delete_modelrun(self, model_run_uuid):
"""
Delete a model run associated with model_run_uuid
Returns:
(bool) True if successful, False if not
"""
full_url = self.modelrun_delete_url + model_run_uuid
result = self.sesh.delete(self.modelrun_delete_url,
data=json.dumps({'model_uuid': model_run_uuid}), verify=False)
if result.status_code == 200:
return True
else:
return False
def create_new_user(self, userid, first_name, last_name, email, password,
address1, address2, city, state, zipcode, tel_voice,
country='USA'):
"""
Create a new virtual watershed user. This is only available to users
with admin status on the virtual waterhsed.
Returns:
(bool) True if succesful, False if not
"""
pass
def _build_query(search_route, **kwargs):
"build the end of a query by translating dict to key1=val1&key2=val2..."
full_url = search_route
for key, val in kwargs.iteritems():
if type(val) is not str:
val = str(val)
full_url += "&%s=%s" % (key, val)
return full_url
class QueryResult:
"""
Represents the response from the VW Data API search methods, , which gives three fields,
'total', 'subtotal', and 'records', represented by the properties explained
in their own docstrings.
"""
def __init__(self, json):
#: raw json returned from the VW
self.json = json
#: total results available on VW
self.total = 0
#: total results returned to client with the query
self.subtotal = 0
#: a list of the results
self.records = json['results']
if 'total' in json:
self.total = int(json['total'])
else:
self.total = len(json['results'])
if 'subtotal' in json:
self.subtotal = int(json['subtotal'])
else:
self.subtotal = len(json['results'])
def default_vw_client(config_file=None):
""" Use the credentials in config_file to initialize a new VWClient instance
Returns: VWClient connected to the ip address given in config_file
"""
if not config_file:
config_file = os.path.join(os.path.dirname(__file__),
'..', 'default.conf')
config = _get_config(config_file)
conn = config['Connection']
return VWClient(conn['watershed_url'], conn['user'], conn['pass'])
def _get_config(config_file=None):
"""Provide user with a ConfigParser that has read the `config_file`
Returns:
(ConfigParser) Config parser with three sections: 'Common',
'FGDC Metadata', and 'Watershed Metadata'
"""
if config_file is None:
config_file = \
os.path.join(os.path.dirname(__file__), '../default.conf')
assert os.path.isfile(config_file), "Config file %s does not exist!" \
% os.path.abspath(config_file)
config = configparser.ConfigParser()
config.read(config_file)
return config
def metadata_from_file(input_file, parent_model_run_uuid, model_run_uuid,
description, watershed_name, state, start_datetime=None,
end_datetime=None, model_name=None, fgdc_metadata=None,
model_set_type=None, model_set_taxonomy=None,
taxonomy=None, water_year_start=2010,
water_year_end=2011, config_file=None, dt=None,
model_set=None, model_vars=None, file_ext=None,
**kwargs):
"""
Generate metadata for input_file.
Arguments:
**kwargs: Set union of kwargs from make_fgdc_metadata and
make_watershed_metadata
Returns:
(str) watershed metadata
"""
assert dt is None or issubclass(type(dt), timedelta)
dt_multiplier = 1 # default if nothing else is known
if config_file:
config = _get_config(config_file)
else:
config = _get_config(
os.path.join(os.path.dirname(__file__), '../default.conf'))
input_basename = os.path.basename(input_file)
input_split = input_basename.split('.')
input_prefix = input_split[0]
if not file_ext:
file_ext = input_file.split('.')[-1]
if not model_set:
model_set = ("outputs", "inputs")[input_prefix == "in"]
start_datetime_str = ""
end_datetime_str = ""
is_ipw = False
try:
# the number on the end of an isnobal file is the time index
dt_multiplier = int(input_split[1])
is_ipw = True
except (ValueError, IndexError):
pass
if file_ext == 'tif':
model_set_type = 'vis'
kwargs['taxonomy'] = 'geoimage'
kwargs['mimetype'] = 'application/x-zip-compressed'
elif file_ext == 'asc':
file_ext = 'ascii'
model_set_type = 'file'
model_set_taxonomy = 'file'
kwargs['taxonomy'] = 'file'
elif file_ext == 'xlsx':
model_set_taxonomy = 'file'
model_set_type = 'file'
kwargs['taxonomy'] = 'file'
elif model_name == 'isnobal' and is_ipw:
#: ISNOBAL variable names to be looked up to make dataframes + metadata
try:
model_vars = ','.join(VARNAME_DICT[input_prefix])
except:
model_vars = ''
if 'proc_date' in kwargs:
proc_date = kwargs['proc_date']
else:
proc_date = None
fgdc_metadata = make_fgdc_metadata(input_basename, config,
model_run_uuid, start_datetime,
end_datetime, proc_date=proc_date)
elif model_name == 'isnobal' and file_ext == 'nc':
model_vars = ','.join([','.join(v) for v in VARNAME_DICT.itervalues()])
if dt is None:
dt = pd.Timedelta('1 hour')
# calculate the "dates" fields for the watershed JSON metadata
start_dt = dt * dt_multiplier
if (start_datetime is None and end_datetime is None):
start_datetime = datetime(water_year_start, 10, 01) + start_dt
start_datetime_str = start_datetime.strftime('%Y-%m-%d %H:%M:%S')
end_datetime = start_datetime + dt
end_datetime_str = datetime.strftime(start_datetime + dt,
'%Y-%m-%d %H:%M:%S')
elif type(start_datetime) is str and type(end_datetime) is str:
start_datetime_str = start_datetime
end_datetime_str = end_datetime
elif type(start_datetime) is datetime and type(end_datetime) is datetime:
start_datetime_str = datetime.strftime(start_datetime,
'%Y-%m-%d %H:%M:%S')
end_datetime_str = datetime.strftime(end_datetime,
'%Y-%m-%d %H:%M:%S')
else:
import ipdb; ipdb.set_trace()
raise TypeError('bad start_ and/or end_datetime arguments')
# we pretty much always want to try to set these
kwargs['wms'] = 'wms'
kwargs['wcs'] = 'wcs'
if file_ext == 'nc':
kwargs['taxonomy'] = 'netcdf_isnobal'
if 'taxonomy' not in kwargs:
kwargs['taxonomy'] = 'file'
elif kwargs['taxonomy'] == '':
kwargs['taxonomy'] = 'file'
return \
make_watershed_metadata(input_basename,
config,
parent_model_run_uuid,
model_run_uuid,
model_set,
watershed_name,
state,
model_name=model_name,
model_set_type=model_set_type,
model_set_taxonomy=model_set_taxonomy,
fgdc_metadata=fgdc_metadata,
description=description,
model_vars=model_vars,
start_datetime=start_datetime_str,
end_datetime=end_datetime_str,
file_ext=file_ext,
**kwargs)
def make_fgdc_metadata(file_name, config, model_run_uuid, beg_date, end_date,
**kwargs):
"""
For a single `data_file`, write the XML FGDC metadata
valid kwargs:
proc_date: date data was processed
theme_key: thematic keywords
model: scientific model, e.g., WindNinja, iSNOBAL, PRMS, etc.
researcher_name: name of researcher
mailing_address: researcher's mailing address
city: research institute city
state: research institute state
zip_code: research institute zip code
researcher_phone: researcher phone number
row_count: number of rows in dataset
column_count: number of columns in dataset
lat_res: resolution in latitude direction (meters)
lon_res: resolution in longitude direction (meters)
map_units: distance units of the map (e.g. 'm')
west_bound: westernmost longitude of bounding box
east_bound: easternmost longitude of bounding box
north_bound: northernmost latitude of bounding box
south_bound: southernmost latitude of bounding box
file_ext: extension of file used to fill out digtinfo:formname;
if not specified, make_fgdc_metadata takes extension
Any other kwargs will be ignored
Returns: XML FGDC metadata string
"""
try:
statinfo = os.stat(file_name)
file_size = "%s" % str(statinfo.st_size/1000000)
except OSError:
file_size = "NA"
if not config:
config = _get_config(
os.path.join(os.path.dirname(__file__), '../default.conf'))
# handle missing required fields not provided in kwargs
geoconf = config['Geo']
resconf = config['Researcher']
# if any of the bounding boxes are not given, all go to default
if not ('west_bound' in kwargs and 'east_bound' in kwargs
and 'north_bound' in kwargs and 'south_bound' in kwargs):
kwargs['west_bound'] = geoconf['default_west_bound']
kwargs['east_bound'] = geoconf['default_east_bound']
kwargs['north_bound'] = geoconf['default_north_bound']
kwargs['south_bound'] = geoconf['default_south_bound']
if not 'proc_date' in kwargs:
kwargs['proc_date'] = date.strftime(date.today(), '%Y-%m-%d')
if not 'researcher_name' in kwargs:
kwargs['researcher_name'] = resconf['researcher_name']
if not 'mailing_address' in kwargs:
kwargs['mailing_address'] = resconf['mailing_address']
if not 'city' in kwargs:
kwargs['city'] = resconf['city']
if not 'state' in kwargs:
kwargs['state'] = resconf['state']
if not 'zip_code' in kwargs:
kwargs['zip_code'] = resconf['zip_code']
if not 'researcher_phone' in kwargs:
kwargs['researcher_phone'] = resconf['phone']
if not 'researcher_email' in kwargs:
kwargs['researcher_email'] = resconf['email']
if 'file_ext' not in kwargs:
kwargs['file_ext'] = file_name.split('.')[-1]
template_env = Environment(loader=FileSystemLoader(
os.path.join(os.path.dirname(__file__), '../templates')))
template = template_env.get_template('fgdc_template.xml')
output = template.render(file_name=file_name,
file_size=file_size,
model_run_uuid=model_run_uuid,
**kwargs)
return output
def make_watershed_metadata(file_name, config, parent_model_run_uuid,
model_run_uuid, model_set, watershed_name,
state, fgdc_metadata=None, file_ext=None,
**kwargs):
""" For a single `file_name`, write the corresponding Virtual Watershed JSON
metadata.
valid kwargs:
orig_epsg: original EPSG code of projection
epsg: current EPSG code
taxonomy: likely 'file'; representation of the data
model_vars: variable(s) represented in the data
model_set: 'inputs', 'outputs', or 'reference'; AssertionError if not
model_set_type: e.g., 'binary', 'csv', 'tif', etc.
model_set_taxonomy: 'grid', 'vector', etc.
west_bound: westernmost longitude of bounding box
east_bound: easternmost longitude of bounding box
north_bound: northernmost latitude of bounding box
south_bound: southernmost latitude of bounding box
start_datetime: datetime observations began, formatted like "2010-01-01 22:00:00"
end_datetime: datetime observations ended, formatted like "2010-01-01 22:00:00"
wms: True if wms service can and should be enabled
wcs: True if wcs service can and should be enabled
watershed_name: Name of watershed, e.g. 'Dry Creek' or 'Lehman Creek'
model_name: Name of model, if applicaple; e.g. 'iSNOBAL', 'PRMS'
mimetype: defaults to application/octet-stream
file_ext: extension to be associated with the dataset; make_watershed_metadata
will take the extension of file_name if not given explicitly
fgdc_metadata: FGDC md probably created by make_fgdc_metadata; if not
given, a default version is created (see source for details)
Returns: JSON metadata string
"""
assert model_set in ["inputs", "outputs", "reference"], \
"parameter model_set must be either 'inputs' or 'outputs', not %s" \
% model_set
# TODO get valid_states and valid_watersheds from VW w/ TODO VWClient method
valid_states = ['Idaho', 'Nevada', 'New Mexico']
assert state in valid_states, "state passed was " + state + \
". Must be one of " + ", ".join(valid_states)
valid_watersheds = ['Dry Creek', 'Valles Caldera', 'Jemez Caldera',
'Lehman Creek', 'Reynolds Creek']
assert watershed_name in valid_watersheds, "watershed passed was " + \
watershed_name + ". Must be one of " + ", ".join(valid_watersheds)
# logic to figure out mimetype and such based on extension
if not file_ext:
file_ext = file_name.split('.')[-1]
# check that the file extension is not a digit as might happen for isnobal
if file_ext.isdigit():
raise ValueError("The extension is a digit. You must explicitly"
+ " provide the file extension with keyword 'file_ext'")
if file_ext == 'tif':
if 'wcs' not in kwargs:
kwargs['wcs'] = True
if 'wms' not in kwargs:
kwargs['wms'] = True
if 'tax' not in kwargs:
kwargs['tax'] = 'geoimage'
if 'mimetype' not in kwargs:
kwargs['mimetype'] = 'application/x-zip-compressed'
if 'model_set_type' not in kwargs:
kwargs['model_set_type'] = 'vis'
if 'mimetype' not in kwargs:
kwargs['mimetype'] = 'application/octet-stream'
# If one of the datetimes is missing
if not ('start_datetime' in kwargs and 'end_datetime' in kwargs):
kwargs['start_datetime'] = "1970-10-01 00:00:00"
kwargs['end_datetime'] = "1970-10-01 01:00:00"
if not fgdc_metadata:
fgdc_kwargs = {k: v for k,v in kwargs.iteritems()
if k not in ['start_datetime', 'end_datetime']}
# can just include all remaining kwargs; no problem if they go unused
fgdc_metadata = make_fgdc_metadata(file_name, config, model_run_uuid,
kwargs['start_datetime'],
kwargs['end_datetime'],
**fgdc_kwargs)
basename = os.path.basename(file_name)
geo_config = config['Geo']
firstTwoUUID = model_run_uuid[:2]
input_file_path = os.path.join("/geodata/watershed-data",
firstTwoUUID,
model_run_uuid,
os.path.basename(file_name))
# properly escape xml metadata escape chars
fgdc_metadata = \
fgdc_metadata.replace('\n', '\\n').replace('\t', '\\t')
geoconf = config['Geo']
resconf = config['Researcher']
# if any of the bounding boxes are not given, all go to default
if not ('west_bound' in kwargs and 'east_bound' in kwargs
and 'north_bound' in kwargs and 'south_bound' in kwargs):
kwargs['west_bound'] = geoconf['default_west_bound']
kwargs['east_bound'] = geoconf['default_east_bound']
kwargs['north_bound'] = geoconf['default_north_bound']
kwargs['south_bound'] = geoconf['default_south_bound']
# write the metadata for a file
# output = template.substitute(# determined by file file_ext, set within function
template_env = Environment(loader=FileSystemLoader(
os.path.join(os.path.dirname(__file__),
'../templates')))
template = template_env.get_template('watershed_template.json')
if 'wcs' in kwargs and kwargs['wcs']:
wcs_str = 'wcs'
else:
wcs_str = None
if 'wms' in kwargs and kwargs['wms']:
wms_str = 'wms'
else:
wms_str = None
if kwargs['model_name'] == 'isnobal' and file_ext != 'tif':
basename = basename
else:
basename = os.path.splitext(basename)[0]
output = template.render(basename=basename,
parent_model_run_uuid=parent_model_run_uuid,
model_run_uuid=model_run_uuid,
model_set=model_set,
watershed_name=watershed_name,
state=state,
wcs_str=wcs_str,
wms_str=wms_str,
input_file_path=input_file_path,
fgdc_metadata=fgdc_metadata,
file_ext=file_ext,
**kwargs) + '\n'
return output
|
bsd-2-clause
|
lethargi/paparazzi
|
sw/misc/attitude_reference/test_att_ref.py
|
49
|
3485
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Antoine Drouin
#
# This file is part of paparazzi.
#
# paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import math
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
import pat.utils as pu
import pat.algebra as pa
import control as ctl
def random_setpoint(time, dt_step=2):
tf = time[0]
sp = np.zeros((len(time), 3))
sp_i = [0, 0, 0]
for i in range(0, len(time)):
if time[i] >= tf:
ui = np.random.rand(3) - [0.5, 0.5, 0.5];
ai = np.random.rand(1)
n = np.linalg.norm(ui)
if n > 0:
ui /= n
sp_i = pa.euler_of_quat(pa.quat_of_axis_angle(ui, ai))
tf += dt_step
sp[i] = sp_i
return sp
def test_ref(r, time, setpoint):
ref = np.zeros((len(time), 9))
for i in range(1, time.size):
sp_quat = pa.quat_of_euler(setpoint[i])
r.update_quat(sp_quat, time[i] - time[i - 1])
euler = pa.euler_of_quat(r.quat)
ref[i] = np.concatenate((euler, r.vel, r.accel))
return ref
def plot_ref(time, xref=None, sp=None, figure=None):
margins = (0.05, 0.05, 0.98, 0.96, 0.20, 0.34)
figure = pu.prepare_fig(figure, window_title='Reference', figsize=(20.48, 10.24), margins=margins)
plots = [("$\phi$", "deg"), ("$\\theta$", "deg"), ("$\\psi$", "deg"),
("$p$", "deg/s"), ("$q$", "deg/s"), ("$r$", "deg/s"),
("$\dot{p}$", "deg/s2"), ("$\dot{q}$", "deg/s2"), ("$\dot{r}$", "deg/s2")]
for i, (title, ylab) in enumerate(plots):
ax = plt.subplot(3, 3, i + 1)
if xref is not None: plt.plot(time, pu.deg_of_rad(xref[:, i]))
pu.decorate(ax, title=title, ylab=ylab)
if sp is not None and i < 3:
plt.plot(time, pu.deg_of_rad(sp[:, i]))
return figure
dt = 1. / 512.
time = np.arange(0., 4, dt)
sp = np.zeros((len(time), 3))
sp[:, 0] = pu.rad_of_deg(45.) * scipy.signal.square(math.pi / 2 * time + math.pi)
# sp[:, 1] = pu.rad_of_deg(5.)*scipy.signal.square(math.pi/2*time)
# sp[:, 2] = pu.rad_of_deg(45.)
# sp = random_setpoint(time)
# rs = [ctl.att_ref_analytic_disc(axis=0), ctl.att_ref_analytic_cont(axis=0), ctl.att_ref_default()]
args = {'omega': 10., 'xi': 0.7, 'sat_vel': pu.rad_of_deg(150.), 'sat_accel': pu.rad_of_deg(1800),
'sat_jerk': pu.rad_of_deg(27000)}
rs = [ctl.att_ref_sat_naive(**args), ctl.att_ref_sat_nested(**args), ctl.att_ref_sat_nested2(**args)]
# rs.append(ctl.AttRefIntNative(**args))
rs.append(ctl.AttRefFloatNative(**args))
xrs = [test_ref(r, time, sp) for r in rs]
figure = None
for xr in xrs:
figure = plot_ref(time, xr, None, figure)
figure = plot_ref(time, None, sp, figure)
legends = [r.name for r in rs] + ['Setpoint']
plt.subplot(3, 3, 3)
plt.legend(legends)
plt.show()
|
gpl-2.0
|
nrhine1/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
98
|
20870
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
|
bsd-3-clause
|
mhdella/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
283
|
1678
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
bmazin/SDR
|
Setup/DetectorAnalysis/QvsNN.py
|
1
|
1282
|
#!/usr/bin/python
import numpy as np
from matplotlib import pyplot as plt
#Plots Histogram of f, Q, and Distance of f to nearest neighbor, Q vs f, Dist to neigh vs f and saves it to a pdf. You need to change the File and pdftitle (and possibly the text position in line 79
File= '20121030/FL-sci4a-all-fits.txt'
pdftitle='/home/sean/data/fitshist/20121030-SCI4a-DF-all-QvsNN.pdf'
autofit=np.loadtxt('/home/sean/data/%s'%File)
freqs=autofit[:,1]
Qs=autofit[:,2]
Qs=[x/1000 for x in Qs]
ds=[]
fs=[]
freq=sorted(freqs)
for i in xrange(len(freqs)):
if i-1 >= 0:
y=abs(freq[i]-freq[i-1])
else:
y=abs(freq[i]-freq[i+1])
if i+1 < len(freqs):
x=abs(freq[i]-freq[i+1])
else:
x=abs(freq[i]-freq[i-1])
if x>=y:
ds.append(y)
else:
ds.append(x)
fs.append(freq[i])
ds=[x*1000 for x in ds]
mf=np.median(freqs)
sf=np.std(freqs)
mq=np.median(Qs)
sq=np.std(Qs)
md=np.median(ds)
sd=np.std(ds)
nres=len(freqs)
fig = plt.figure()
ax5=fig.add_subplot(111)
ax5.plot(ds,Qs,'b,')
ax5.set_ylabel('Q(k)', size=8)
ax5.set_xlabel('Distance of f to Nearest Neighbor (MHz)', size=8)
ax5.set_title('Nearest Neighbor vs f0', size=9)
ax5.tick_params(labelsize=8)
#ax5.set_ylim(0,20)
fig.savefig(pdftitle)
plt.show()
plt.close()
|
gpl-2.0
|
jmontoyam/mne-python
|
mne/tests/test_bem.py
|
8
|
16018
|
# Authors: Marijn van Vliet <[email protected]>
#
# License: BSD 3 clause
from copy import deepcopy
from os import remove
import os.path as op
from shutil import copy
import warnings
import numpy as np
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_equal, assert_allclose
from mne import (make_bem_model, read_bem_surfaces, write_bem_surfaces,
make_bem_solution, read_bem_solution, write_bem_solution,
make_sphere_model, Transform, Info)
from mne.preprocessing.maxfilter import fit_sphere_to_headshape
from mne.io.constants import FIFF
from mne.transforms import translation
from mne.datasets import testing
from mne.utils import (run_tests_if_main, _TempDir, slow_test, catch_logging,
requires_freesurfer)
from mne.bem import (_ico_downsample, _get_ico_map, _order_surfaces,
_assert_complete_surface, _assert_inside,
_check_surface_size, _bem_find_surface, make_flash_bem)
from mne.surface import read_surface
from mne.io import read_info
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always')
fname_raw = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test_raw.fif')
subjects_dir = op.join(testing.data_path(download=False), 'subjects')
fname_bem_3 = op.join(subjects_dir, 'sample', 'bem',
'sample-320-320-320-bem.fif')
fname_bem_1 = op.join(subjects_dir, 'sample', 'bem',
'sample-320-bem.fif')
fname_bem_sol_3 = op.join(subjects_dir, 'sample', 'bem',
'sample-320-320-320-bem-sol.fif')
fname_bem_sol_1 = op.join(subjects_dir, 'sample', 'bem',
'sample-320-bem-sol.fif')
def _compare_bem_surfaces(surfs_1, surfs_2):
"""Helper to compare BEM surfaces"""
names = ['id', 'nn', 'rr', 'coord_frame', 'tris', 'sigma', 'ntri', 'np']
ignores = ['tri_cent', 'tri_nn', 'tri_area', 'neighbor_tri']
for s0, s1 in zip(surfs_1, surfs_2):
assert_equal(set(names), set(s0.keys()) - set(ignores))
assert_equal(set(names), set(s1.keys()) - set(ignores))
for name in names:
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=1e-6,
err_msg='Mismatch: "%s"' % name)
def _compare_bem_solutions(sol_a, sol_b):
"""Helper to compare BEM solutions"""
# compare the surfaces we used
_compare_bem_surfaces(sol_a['surfs'], sol_b['surfs'])
# compare the actual solutions
names = ['bem_method', 'field_mult', 'gamma', 'is_sphere',
'nsol', 'sigma', 'source_mult', 'solution']
assert_equal(set(sol_a.keys()), set(sol_b.keys()))
assert_equal(set(names + ['surfs']), set(sol_b.keys()))
for key in names:
assert_allclose(sol_a[key], sol_b[key], rtol=1e-3, atol=1e-5,
err_msg='Mismatch: %s' % key)
@testing.requires_testing_data
def test_io_bem():
"""Test reading and writing of bem surfaces and solutions"""
tempdir = _TempDir()
temp_bem = op.join(tempdir, 'temp-bem.fif')
assert_raises(ValueError, read_bem_surfaces, fname_raw)
assert_raises(ValueError, read_bem_surfaces, fname_bem_3, s_id=10)
surf = read_bem_surfaces(fname_bem_3, patch_stats=True)
surf = read_bem_surfaces(fname_bem_3, patch_stats=False)
write_bem_surfaces(temp_bem, surf[0])
surf_read = read_bem_surfaces(temp_bem, patch_stats=False)
_compare_bem_surfaces(surf, surf_read)
assert_raises(RuntimeError, read_bem_solution, fname_bem_3)
temp_sol = op.join(tempdir, 'temp-sol.fif')
sol = read_bem_solution(fname_bem_sol_3)
assert_true('BEM' in repr(sol))
write_bem_solution(temp_sol, sol)
sol_read = read_bem_solution(temp_sol)
_compare_bem_solutions(sol, sol_read)
sol = read_bem_solution(fname_bem_sol_1)
assert_raises(RuntimeError, _bem_find_surface, sol, 3)
def test_make_sphere_model():
"""Test making a sphere model"""
info = read_info(fname_raw)
assert_raises(ValueError, make_sphere_model, 'foo', 'auto', info)
assert_raises(ValueError, make_sphere_model, 'auto', 'auto', None)
assert_raises(ValueError, make_sphere_model, 'auto', 'auto', info,
relative_radii=(), sigmas=())
assert_raises(ValueError, make_sphere_model, 'auto', 'auto', info,
relative_radii=(1,)) # wrong number of radii
# here we just make sure it works -- the functionality is actually
# tested more extensively e.g. in the forward and dipole code
bem = make_sphere_model('auto', 'auto', info)
assert_true('3 layers' in repr(bem))
assert_true('Sphere ' in repr(bem))
assert_true(' mm' in repr(bem))
bem = make_sphere_model('auto', None, info)
assert_true('no layers' in repr(bem))
assert_true('Sphere ' in repr(bem))
@testing.requires_testing_data
def test_bem_model():
"""Test BEM model creation from Python with I/O"""
tempdir = _TempDir()
fname_temp = op.join(tempdir, 'temp-bem.fif')
for kwargs, fname in zip((dict(), dict(conductivity=[0.3])),
[fname_bem_3, fname_bem_1]):
model = make_bem_model('sample', ico=2, subjects_dir=subjects_dir,
**kwargs)
model_c = read_bem_surfaces(fname)
_compare_bem_surfaces(model, model_c)
write_bem_surfaces(fname_temp, model)
model_read = read_bem_surfaces(fname_temp)
_compare_bem_surfaces(model, model_c)
_compare_bem_surfaces(model_read, model_c)
assert_raises(ValueError, make_bem_model, 'sample', # bad conductivity
conductivity=[0.3, 0.006], subjects_dir=subjects_dir)
@slow_test
@testing.requires_testing_data
def test_bem_solution():
"""Test making a BEM solution from Python with I/O"""
# test degenerate conditions
surf = read_bem_surfaces(fname_bem_1)[0]
assert_raises(RuntimeError, _ico_downsample, surf, 10) # bad dec grade
s_bad = dict(tris=surf['tris'][1:], ntri=surf['ntri'] - 1, rr=surf['rr'])
assert_raises(RuntimeError, _ico_downsample, s_bad, 1) # not isomorphic
s_bad = dict(tris=surf['tris'].copy(), ntri=surf['ntri'],
rr=surf['rr']) # bad triangulation
s_bad['tris'][0] = [0, 0, 0]
assert_raises(RuntimeError, _ico_downsample, s_bad, 1)
s_bad['id'] = 1
assert_raises(RuntimeError, _assert_complete_surface, s_bad)
s_bad = dict(tris=surf['tris'], ntri=surf['ntri'], rr=surf['rr'].copy())
s_bad['rr'][0] = 0.
assert_raises(RuntimeError, _get_ico_map, surf, s_bad)
surfs = read_bem_surfaces(fname_bem_3)
assert_raises(RuntimeError, _assert_inside, surfs[0], surfs[1]) # outside
surfs[0]['id'] = 100 # bad surfs
assert_raises(RuntimeError, _order_surfaces, surfs)
surfs[1]['rr'] /= 1000.
assert_raises(RuntimeError, _check_surface_size, surfs[1])
# actually test functionality
tempdir = _TempDir()
fname_temp = op.join(tempdir, 'temp-bem-sol.fif')
# use a model and solution made in Python
conductivities = [(0.3,), (0.3, 0.006, 0.3)]
fnames = [fname_bem_sol_1, fname_bem_sol_3]
for cond, fname in zip(conductivities, fnames):
for model_type in ('python', 'c'):
if model_type == 'python':
model = make_bem_model('sample', conductivity=cond, ico=2,
subjects_dir=subjects_dir)
else:
model = fname_bem_1 if len(cond) == 1 else fname_bem_3
solution = make_bem_solution(model)
solution_c = read_bem_solution(fname)
_compare_bem_solutions(solution, solution_c)
write_bem_solution(fname_temp, solution)
solution_read = read_bem_solution(fname_temp)
_compare_bem_solutions(solution, solution_c)
_compare_bem_solutions(solution_read, solution_c)
def test_fit_sphere_to_headshape():
"""Test fitting a sphere to digitization points"""
# Create points of various kinds
rad = 0.09
big_rad = 0.12
center = np.array([0.0005, -0.01, 0.04])
dev_trans = np.array([0., -0.005, -0.01])
dev_center = center - dev_trans
dig = [
# Left auricular
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'ident': FIFF.FIFFV_POINT_LPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'r': np.array([-1.0, 0.0, 0.0])},
# Nasion
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'ident': FIFF.FIFFV_POINT_NASION,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'r': np.array([0.0, 1.0, 0.0])},
# Right auricular
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'ident': FIFF.FIFFV_POINT_RPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'r': np.array([1.0, 0.0, 0.0])},
# Top of the head (extra point)
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EXTRA,
'r': np.array([0.0, 0.0, 1.0])},
# EEG points
# Fz
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'r': np.array([0, .72, .69])},
# F3
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'r': np.array([-.55, .67, .50])},
# F4
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'r': np.array([.55, .67, .50])},
# Cz
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'r': np.array([0.0, 0.0, 1.0])},
# Pz
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'r': np.array([0, -.72, .69])},
]
for d in dig:
d['r'] *= rad
d['r'] += center
# Device to head transformation (rotate .2 rad over X-axis)
dev_head_t = Transform('meg', 'head', translation(*(dev_trans)))
info = Info(dig=dig, dev_head_t=dev_head_t)
# Degenerate conditions
assert_raises(ValueError, fit_sphere_to_headshape, info,
dig_kinds=(FIFF.FIFFV_POINT_HPI,))
assert_raises(ValueError, fit_sphere_to_headshape, info,
dig_kinds='foo', units='m')
info['dig'][0]['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
assert_raises(RuntimeError, fit_sphere_to_headshape, info, units='m')
info['dig'][0]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
# # Test with 4 points that match a perfect sphere
dig_kinds = (FIFF.FIFFV_POINT_CARDINAL, FIFF.FIFFV_POINT_EXTRA)
with warnings.catch_warnings(record=True): # not enough points
r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds,
units='m')
kwargs = dict(rtol=1e-3, atol=1e-5)
assert_allclose(r, rad, **kwargs)
assert_allclose(oh, center, **kwargs)
assert_allclose(od, dev_center, **kwargs)
# Test with all points
dig_kinds = ('cardinal', FIFF.FIFFV_POINT_EXTRA, 'eeg')
kwargs = dict(rtol=1e-3, atol=1e-3)
with warnings.catch_warnings(record=True): # not enough points
r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds,
units='m')
assert_allclose(r, rad, **kwargs)
assert_allclose(oh, center, **kwargs)
assert_allclose(od, dev_center, **kwargs)
# Test with some noisy EEG points only.
dig_kinds = 'eeg'
with warnings.catch_warnings(record=True): # not enough points
r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds,
units='m')
kwargs = dict(rtol=1e-3, atol=1e-2)
assert_allclose(r, rad, **kwargs)
assert_allclose(oh, center, **kwargs)
assert_allclose(od, center, **kwargs)
# Test big size
dig_kinds = ('cardinal', 'extra')
info_big = deepcopy(info)
for d in info_big['dig']:
d['r'] -= center
d['r'] *= big_rad / rad
d['r'] += center
with warnings.catch_warnings(record=True): # fit
with catch_logging() as log_file:
r, oh, od = fit_sphere_to_headshape(info_big, dig_kinds=dig_kinds,
verbose='warning', units='mm')
log_file = log_file.getvalue().strip()
assert_equal(len(log_file.split('\n')), 2)
assert_true('Estimated head size' in log_file)
assert_allclose(oh, center * 1000, atol=1e-3)
assert_allclose(r, big_rad * 1000, atol=1e-3)
del info_big
# Test offcenter
dig_kinds = ('cardinal', 'extra')
info_shift = deepcopy(info)
shift_center = np.array([0., -0.03, 0.])
for d in info_shift['dig']:
d['r'] -= center
d['r'] += shift_center
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
r, oh, od = fit_sphere_to_headshape(
info_shift, dig_kinds=dig_kinds, verbose='warning', units='m')
log_file = log_file.getvalue().strip()
assert_equal(len(log_file.split('\n')), 2)
assert_true('from head frame origin' in log_file)
assert_allclose(oh, shift_center, atol=1e-6)
assert_allclose(r, rad, atol=1e-6)
# Test "auto" mode (default)
# Should try "extra", fail, and go on to EEG
with warnings.catch_warnings(record=True): # not enough points
r, oh, od = fit_sphere_to_headshape(info, units='m')
kwargs = dict(rtol=1e-3, atol=1e-3)
assert_allclose(r, rad, **kwargs)
assert_allclose(oh, center, **kwargs)
assert_allclose(od, dev_center, **kwargs)
with warnings.catch_warnings(record=True): # not enough points
r2, oh2, od2 = fit_sphere_to_headshape(info, units='m')
assert_allclose(r, r2, atol=1e-7)
assert_allclose(oh, oh2, atol=1e-7)
assert_allclose(od, od2, atol=1e-7)
# this one should pass, 1 EXTRA point and 3 EEG (but the fit is terrible)
info = Info(dig=dig[:7], dev_head_t=dev_head_t)
with warnings.catch_warnings(record=True): # bad fit
r, oh, od = fit_sphere_to_headshape(info, units='m')
# this one should fail, 1 EXTRA point and 3 EEG (but the fit is terrible)
info = Info(dig=dig[:6], dev_head_t=dev_head_t)
assert_raises(ValueError, fit_sphere_to_headshape, info, units='m')
assert_raises(TypeError, fit_sphere_to_headshape, 1, units='m')
@requires_freesurfer
@testing.requires_testing_data
def test_make_flash_bem():
"""Test computing bem from flash images."""
import matplotlib.pyplot as plt
tmp = _TempDir()
bemdir = op.join(subjects_dir, 'sample', 'bem')
flash_path = op.join(subjects_dir, 'sample', 'mri', 'flash')
for surf in ('inner_skull', 'outer_skull', 'outer_skin'):
copy(op.join(bemdir, surf + '.surf'), tmp)
copy(op.join(bemdir, surf + '.tri'), tmp)
copy(op.join(bemdir, 'inner_skull_tmp.tri'), tmp)
copy(op.join(bemdir, 'outer_skin_from_testing.surf'), tmp)
# This function deletes the tri files at the end.
try:
make_flash_bem('sample', overwrite=True, subjects_dir=subjects_dir,
flash_path=flash_path)
for surf in ('inner_skull', 'outer_skull', 'outer_skin'):
coords, faces = read_surface(op.join(bemdir, surf + '.surf'))
surf = 'outer_skin_from_testing' if surf == 'outer_skin' else surf
coords_c, faces_c = read_surface(op.join(tmp, surf + '.surf'))
assert_equal(0, faces.min())
assert_equal(coords.shape[0], faces.max() + 1)
assert_allclose(coords, coords_c)
assert_allclose(faces, faces_c)
finally:
for surf in ('inner_skull', 'outer_skull', 'outer_skin'):
remove(op.join(bemdir, surf + '.surf')) # delete symlinks
copy(op.join(tmp, surf + '.tri'), bemdir) # return deleted tri
copy(op.join(tmp, surf + '.surf'), bemdir) # return moved surf
copy(op.join(tmp, 'inner_skull_tmp.tri'), bemdir)
copy(op.join(tmp, 'outer_skin_from_testing.surf'), bemdir)
plt.close('all')
run_tests_if_main()
|
bsd-3-clause
|
ducnguyen1911/kaggle_criteo_ads
|
stats.py
|
2
|
6460
|
import pandas as pd
import numpy as np
import sys
import gc
def compute_integer_stats(input_file, chunk_size):
"""
Compute training data statistics
Integer Variables: Min, Max, Mean
Click: Click, Not Click
:return:
"""
stats_integer = pd.DataFrame()
clicks = 0
impressions = 0
reader = pd.read_csv(input_file, chunksize=chunk_size)
count = 0
for chunk in reader:
print 'Reading line:' + str(count * chunk_size)
chunk_integer = chunk.iloc[:, 2:15]
if count == 0:
stats_integer['max'] = chunk_integer.max()
stats_integer['min'] = chunk_integer.min()
stats_integer['sum'] = chunk_integer.sum()
stats_integer['count'] = chunk_integer.count()
else:
stats_integer['max_chunk'] = chunk_integer.max()
stats_integer['min_chunk'] = chunk_integer.min()
stats_integer['sum_chunk'] = chunk_integer.sum()
stats_integer['count_chunk'] = chunk_integer.count()
stats_integer['max'] = stats_integer[['max', 'max_chunk']].max(axis=1)
stats_integer['min'] = stats_integer[['min', 'min_chunk']].max(axis=1)
stats_integer['sum'] = stats_integer[['sum', 'sum_chunk']].sum(axis=1)
stats_integer['count'] = stats_integer[['count', 'count_chunk']].sum(axis=1)
stats_integer.drop(['max_chunk', 'min_chunk', 'sum_chunk', 'count_chunk'], axis=1, inplace=True)
clicks += chunk['Label'].sum()
impressions += chunk.shape[0]
count += 1
stats_integer['mean'] = stats_integer['sum'] / stats_integer['count']
reader = pd.read_csv(input_file, chunksize=chunk_size)
count = 0
for chunk in reader:
print 'Reading line:' + str(count * chunk_size)
chunk_integer = chunk.iloc[:, 2:15]
if count == 0:
stats_integer['sq_sum'] = ((chunk_integer - stats_integer['mean']) ** 2).sum()
else:
stats_integer['sq_sum_chunk'] = ((chunk_integer - stats_integer['mean']) ** 2).sum()
stats_integer['sq_sum'] = stats_integer[['sq_sum', 'sq_sum_chunk']].sum(axis=1)
stats_integer.drop(['sq_sum_chunk'], axis=1, inplace=True)
count += 1
stats_integer['std'] = (stats_integer['sq_sum'] / (stats_integer['count'] - 1)).apply(np.sqrt)
stats_integer.drop(['sq_sum'], axis=1, inplace=True)
print stats_integer
stats_integer.to_csv('data/integer_stats.csv')
print "Total Clicks:" + str(clicks) + " Total Impressions:" + str(impressions)
def compute_category_stats(input_file, category_label, chunk_size):
"""
Compute training data statistics
Categorical Variables: Number of categories, Histogram
Integer Variables: Min, Max, Mean
Click: Click, Not Click
:return:
"""
stats_category = pd.DataFrame()
reader = pd.read_csv(input_file, chunksize=chunk_size)
count = 0
for chunk in reader:
print 'Reading line:' + str(count * chunk_size)
chunk_category = chunk.iloc[:, 15:]
frame = pd.DataFrame()
frame['category'] = chunk_category.groupby(category_label).size().index
frame['count'] = chunk_category.groupby(category_label).size().values
stats_category = pd.concat([stats_category, frame])
# Aggregate on common category values
frame = pd.DataFrame()
frame['category'] = stats_category.groupby('category').sum().index
frame['count'] = stats_category.groupby("category").sum().values
stats_category = frame
# Force garbage collection
gc.collect()
count += 1
return stats_category.describe()
def compute_category_stats_all(input_file, chunk_size):
"""
Compute training data statistics
Categorical Variables: Number of categories, Histogram
Integer Variables: Min, Max, Mean
Click: Click, Not Click
:return:
"""
stats_category = {}
for i in range(1, 27):
stats_category['C' + str(i)] = pd.DataFrame()
reader = pd.read_csv(input_file, chunksize=chunk_size)
count = 0
for chunk in reader:
print 'Reading line:' + str(count * chunk_size)
chunk_category = chunk.iloc[:, 15:]
for i in range(1, 27):
category_label = 'C' + str(i)
frame = pd.DataFrame()
frame['category'] = chunk_category.groupby(category_label).size().index
frame['count'] = chunk_category.groupby(category_label).size().values
stats_category[category_label] = pd.concat([stats_category[category_label], frame])
# Aggregate on common category values
frame = pd.DataFrame()
frame['category'] = stats_category[category_label].groupby('category').sum().index
frame['count'] = stats_category[category_label].groupby("category").sum().values
stats_category[category_label] = frame
gc.collect()
count += 1
stats_category_agg = pd.DataFrame()
for i in range(1, 27):
frame = stats_category['C' + str(i)].groupby('category').sum().describe().transpose()
frame.reset_index()
frame.index = ['C' + str(i)]
stats_category_agg = pd.concat([stats_category_agg, frame])
print stats_category_agg
stats_category_agg.to_csv('data/category_stats.csv')
def main():
if len(sys.argv) == 4:
action_type = sys.argv[1]
if action_type == 'integer':
chunk_size = int(sys.argv[3])
input_file = sys.argv[2]
compute_integer_stats(input_file, chunk_size)
elif action_type == 'category':
input_file = sys.argv[2]
chunk_size = int(sys.argv[3])
# stats_category = pd.DataFrame()
# for i in range(1, 27):
# category_label = 'C' + str(i)
# print "Starting analysis for category: " + category_label
# frame = compute_category_stats(input_file, category_label, chunk_size).transpose()
# frame.reset_index()
# frame.index = [category_label]
# stats_category = pd.concat([stats_category, frame])
# print stats_category
compute_category_stats_all(input_file, chunk_size)
else:
print "Usage {0} <feature-type> <input-file> <chunk-size>".format(sys.argv[0])
print "<feature-type> is integer or category"
main()
|
mit
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/pandas/tests/io/parser/usecols.py
|
3
|
16067
|
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas._libs.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("'usecols' must either be all strings, all unicode, "
"all integers or a callable")
usecols = [0, 'b', 2]
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
assert len(result.columns) == 2
assert (result['b'] == exp['b']).all()
assert (result['c'] == exp['c']).all()
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# see gh-5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
pytest.raises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_usecols_index_col_False(self):
# see gh-9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_usecols_index_col_conflict(self):
# see gh-4201: test that index_col as integer reflects usecols
data = 'a,b,c,d\nA,a,1,one\nB,b,2,two'
expected = DataFrame({'c': [1, 2]}, index=Index(
['a', 'b'], name='b'))
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'b': ['a', 'b'], 'c': [1, 2], 'd': ('one', 'two')})
expected = expected.set_index(['b', 'c'])
df = self.read_csv(StringIO(data), usecols=['b', 'c', 'd'],
index_col=['b', 'c'])
tm.assert_frame_equal(expected, df)
def test_usecols_implicit_index_col(self):
# see gh-2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# see gh-2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep=r'\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_integer_like_header(self):
data = """2,0,1
1000,2000,3000
4000,5000,6000
"""
usecols = [0, 1] # column selection by index
expected = DataFrame(data=[[1000, 2000],
[4000, 5000]],
columns=['2', '0'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['0', '1'] # column selection by name
expected = DataFrame(data=[[2000, 3000],
[5000, 6000]],
columns=['0', '1'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates(self):
# See gh-9755
s = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
# See gh-13604
s = """2008-02-07 09:40,1032.43
2008-02-07 09:50,1042.54
2008-02-07 10:00,1051.65
"""
parse_dates = [0]
names = ['date', 'values']
usecols = names[:]
index = Index([Timestamp('2008-02-07 09:40'),
Timestamp('2008-02-07 09:50'),
Timestamp('2008-02-07 10:00')],
name='date')
cols = {'values': [1032.43, 1042.54, 1051.65]}
expected = DataFrame(cols, index=index)
df = self.read_csv(StringIO(s), parse_dates=parse_dates, index_col=0,
usecols=usecols, header=None, names=names)
tm.assert_frame_equal(df, expected)
# See gh-14792
s = """a,b,c,d,e,f,g,h,i,j
2016/09/21,1,1,2,3,4,5,6,7,8"""
parse_dates = [0]
usecols = list('abcdefghij')
cols = {'a': Timestamp('2016-09-21'),
'b': [1], 'c': [1], 'd': [2],
'e': [3], 'f': [4], 'g': [5],
'h': [6], 'i': [7], 'j': [8]}
expected = DataFrame(cols, columns=usecols)
df = self.read_csv(StringIO(s), usecols=usecols,
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
s = """a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8"""
parse_dates = [[0, 1]]
usecols = list('abcdefghij')
cols = {'a_b': '2016/09/21 1',
'c': [1], 'd': [2], 'e': [3], 'f': [4],
'g': [5], 'h': [6], 'i': [7], 'j': [8]}
expected = DataFrame(cols, columns=['a_b'] + list('cdefghij'))
df = self.read_csv(StringIO(s), usecols=usecols,
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates_and_full_names(self):
# See gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
names = list('abcde')
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), names=names,
usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), names=names,
usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates_and_usecol_names(self):
# See gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
names = list('acd')
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), names=names,
usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), names=names,
usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_unicode_strings(self):
# see gh-13219
s = '''AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'AAA': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'BBB': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=[u'AAA', u'BBB'])
tm.assert_frame_equal(df, expected)
def test_usecols_with_single_byte_unicode_strings(self):
# see gh-13219
s = '''A,B,C,D
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'A': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'B': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=[u'A', u'B'])
tm.assert_frame_equal(df, expected)
def test_usecols_with_mixed_encoding_strings(self):
s = '''AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
msg = ("'usecols' must either be all strings, all unicode, "
"all integers or a callable")
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(s), usecols=[u'AAA', b'BBB'])
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(s), usecols=[b'AAA', u'BBB'])
def test_usecols_with_multibyte_characters(self):
s = '''あああ,いい,ううう,ええええ
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'あああ': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'いい': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=['あああ', 'いい'])
tm.assert_frame_equal(df, expected)
def test_usecols_with_multibyte_unicode_characters(self):
pytest.skip('TODO: see gh-13253')
s = '''あああ,いい,ううう,ええええ
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'あああ': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'いい': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=[u'あああ', u'いい'])
tm.assert_frame_equal(df, expected)
def test_empty_usecols(self):
# should not raise
data = 'a,b,c\n1,2,3\n4,5,6'
expected = DataFrame()
result = self.read_csv(StringIO(data), usecols=set([]))
tm.assert_frame_equal(result, expected)
def test_np_array_usecols(self):
# See gh-12546
data = 'a,b,c\n1,2,3'
usecols = np.array(['a', 'b'])
expected = DataFrame([[1, 2]], columns=usecols)
result = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
def test_callable_usecols(self):
# See gh-14154
s = '''AaA,bBb,CCC,ddd
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'AaA': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'bBb': {0: 8, 1: 2, 2: 7},
'ddd': {0: 'a', 1: 'b', 2: 'a'}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=lambda x:
x.upper() in ['AAA', 'BBB', 'DDD'])
tm.assert_frame_equal(df, expected)
# Check that a callable returning only False returns
# an empty DataFrame
expected = DataFrame()
df = self.read_csv(StringIO(s), usecols=lambda x: False)
tm.assert_frame_equal(df, expected)
def test_incomplete_first_row(self):
# see gh-6710
data = '1,2\n1,2,3'
names = ['a', 'b', 'c']
expected = DataFrame({'a': [1, 1],
'c': [np.nan, 3]})
usecols = ['a', 'c']
df = self.read_csv(StringIO(data), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = lambda x: x in ['a', 'c']
df = self.read_csv(StringIO(data), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_uneven_length_cols(self):
# see gh-8985
usecols = [0, 1, 2]
data = '19,29,39\n' * 2 + '10,20,30,40'
expected = DataFrame([[19, 29, 39],
[19, 29, 39],
[10, 20, 30]])
df = self.read_csv(StringIO(data), header=None, usecols=usecols)
tm.assert_frame_equal(df, expected)
# see gh-9549
usecols = ['A', 'B', 'C']
data = ('A,B,C\n1,2,3\n3,4,5\n1,2,4,5,1,6\n'
'1,2,3,,,1,\n1,2,3\n5,6,7')
expected = DataFrame({'A': [1, 3, 1, 1, 1, 5],
'B': [2, 4, 2, 2, 2, 6],
'C': [3, 5, 4, 3, 3, 7]})
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
|
agpl-3.0
|
mjudsp/Tsallis
|
examples/ensemble/plot_gradient_boosting_oob.py
|
50
|
4764
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/tests/extension/base/missing.py
|
4
|
4515
|
import numpy as np
import pandas as pd
import pandas._testing as tm
from .base import BaseExtensionTests
class BaseMissingTests(BaseExtensionTests):
def test_isna(self, data_missing):
expected = np.array([True, False])
result = pd.isna(data_missing)
tm.assert_numpy_array_equal(result, expected)
result = pd.Series(data_missing).isna()
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
# GH 21189
result = pd.Series(data_missing).drop([0, 1]).isna()
expected = pd.Series([], dtype=bool)
self.assert_series_equal(result, expected)
def test_dropna_array(self, data_missing):
result = data_missing.dropna()
expected = data_missing[[1]]
self.assert_extension_array_equal(result, expected)
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
expected = ser.iloc[[1]]
self.assert_series_equal(result, expected)
def test_dropna_frame(self, data_missing):
df = pd.DataFrame({"A": data_missing})
# defaults
result = df.dropna()
expected = df.iloc[[1]]
self.assert_frame_equal(result, expected)
# axis = 1
result = df.dropna(axis="columns")
expected = pd.DataFrame(index=[0, 1])
self.assert_frame_equal(result, expected)
# multiple
df = pd.DataFrame({"A": data_missing, "B": [1, np.nan]})
result = df.dropna()
expected = df.iloc[:0]
self.assert_frame_equal(result, expected)
def test_fillna_scalar(self, data_missing):
valid = data_missing[1]
result = data_missing.fillna(valid)
expected = data_missing.fillna(valid)
self.assert_extension_array_equal(result, expected)
def test_fillna_limit_pad(self, data_missing):
arr = data_missing.take([1, 0, 0, 0, 1])
result = pd.Series(arr).fillna(method="ffill", limit=2)
expected = pd.Series(data_missing.take([1, 1, 1, 0, 1]))
self.assert_series_equal(result, expected)
def test_fillna_limit_backfill(self, data_missing):
arr = data_missing.take([1, 0, 0, 0, 1])
result = pd.Series(arr).fillna(method="backfill", limit=2)
expected = pd.Series(data_missing.take([1, 0, 1, 1, 1]))
self.assert_series_equal(result, expected)
def test_fillna_series(self, data_missing):
fill_value = data_missing[1]
ser = pd.Series(data_missing)
result = ser.fillna(fill_value)
expected = pd.Series(
data_missing._from_sequence(
[fill_value, fill_value], dtype=data_missing.dtype
)
)
self.assert_series_equal(result, expected)
# Fill with a series
result = ser.fillna(expected)
self.assert_series_equal(result, expected)
# Fill with a series not affecting the missing values
result = ser.fillna(ser)
self.assert_series_equal(result, ser)
def test_fillna_series_method(self, data_missing, fillna_method):
fill_value = data_missing[1]
if fillna_method == "ffill":
data_missing = data_missing[::-1]
result = pd.Series(data_missing).fillna(method=fillna_method)
expected = pd.Series(
data_missing._from_sequence(
[fill_value, fill_value], dtype=data_missing.dtype
)
)
self.assert_series_equal(result, expected)
def test_fillna_frame(self, data_missing):
fill_value = data_missing[1]
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
expected = pd.DataFrame(
{
"A": data_missing._from_sequence(
[fill_value, fill_value], dtype=data_missing.dtype
),
"B": [1, 2],
}
)
self.assert_frame_equal(result, expected)
def test_fillna_fill_other(self, data):
result = pd.DataFrame({"A": data, "B": [np.nan] * len(data)}).fillna({"B": 0.0})
expected = pd.DataFrame({"A": data, "B": [0.0] * len(result)})
self.assert_frame_equal(result, expected)
def test_use_inf_as_na_no_effect(self, data_missing):
ser = pd.Series(data_missing)
expected = ser.isna()
with pd.option_context("mode.use_inf_as_na", True):
result = ser.isna()
self.assert_series_equal(result, expected)
|
bsd-3-clause
|
sonnyhu/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
1
|
25481
|
# Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_) -
np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 10, 3), clf.mse_path_.shape)
assert_equal((2, 10), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((10, 3), clf.mse_path_.shape)
assert_equal(10, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises_regex(ValueError, ".*should be.*True.*False.*auto.*"
"array-like.*Got 'invalid'", clf.fit, X, y)
# Precompute = 'auto' is not supported for ElasticNet
assert_raises_regex(ValueError, ".*should be.*True.*False.*array-like.*"
"Got 'auto'", ElasticNet(precompute='auto').fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong dtype,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
def test_lasso_non_float_y():
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
for model in [ElasticNet, Lasso]:
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
|
bsd-3-clause
|
qifeigit/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
57
|
16523
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
|
bsd-3-clause
|
vortex-ape/scikit-learn
|
sklearn/tests/test_site_joblib.py
|
5
|
2050
|
import os
import pytest
from sklearn import externals
from sklearn.externals import joblib as joblib_vendored
from sklearn.utils import Parallel, delayed, Memory, parallel_backend
if os.environ.get('SKLEARN_SITE_JOBLIB', False):
import joblib as joblib_site
else:
joblib_site = None
def test_old_pickle(tmpdir):
vendored_joblib_home = os.path.dirname(joblib_vendored.__file__)
sklearn_externals_home = os.path.dirname(externals.__file__)
if not vendored_joblib_home.startswith(sklearn_externals_home):
pytest.skip("joblib is physically unvendored (e.g. as in debian)")
# Check that a pickle that references sklearn.external.joblib can load
f = tmpdir.join('foo.pkl')
f.write(b'\x80\x02csklearn.externals.joblib.numpy_pickle\nNumpyArrayWrappe'
b'r\nq\x00)\x81q\x01}q\x02(U\x05dtypeq\x03cnumpy\ndtype\nq\x04U'
b'\x02i8q\x05K\x00K\x01\x87q\x06Rq\x07(K\x03U\x01<q\x08NNNJ\xff'
b'\xff\xff\xffJ\xff\xff\xff\xffK\x00tq\tbU\x05shapeq\nK\x01\x85q'
b'\x0bU\x05orderq\x0cU\x01Cq\rU\x08subclassq\x0ecnumpy\nndarray\nq'
b'\x0fU\nallow_mmapq\x10\x88ub\x01\x00\x00\x00\x00\x00\x00\x00.',
mode='wb')
joblib_vendored.load(str(f))
def test_site_joblib_dispatch():
if os.environ.get('SKLEARN_SITE_JOBLIB', False):
assert Parallel is joblib_site.Parallel
assert delayed is joblib_site.delayed
assert parallel_backend is joblib_site.parallel_backend
assert Memory is joblib_site.Memory
assert joblib_vendored.Parallel is not joblib_site.Parallel
assert joblib_vendored.delayed is not joblib_site.delayed
assert joblib_vendored.parallel_backend is not \
joblib_site.parallel_backend
assert joblib_vendored.Memory is not joblib_site.Memory
else:
assert Parallel is joblib_vendored.Parallel
assert delayed is joblib_vendored.delayed
assert parallel_backend is joblib_vendored.parallel_backend
assert Memory is joblib_vendored.Memory
|
bsd-3-clause
|
h2educ/scikit-learn
|
examples/cluster/plot_ward_structured_vs_unstructured.py
|
320
|
3369
|
"""
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
|
bsd-3-clause
|
NixaSoftware/CVis
|
venv/lib/python2.7/site-packages/pandas/tests/io/parser/test_read_fwf.py
|
11
|
16032
|
# -*- coding: utf-8 -*-
"""
Tests the 'read_fwf' function in parsers.py. This
test suite is independent of the others because the
engine is set to 'python-fwf' internally.
"""
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame
from pandas import compat
from pandas.compat import StringIO, BytesIO
from pandas.io.parsers import read_csv, read_fwf, EmptyDataError
class TestFwfParsing(object):
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = read_csv(StringIO(data_expected),
engine='python', header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From Thomas Kluyver: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assert_raises_regex(ValueError,
"must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assert_raises_regex(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_BytesIO_input(self):
if not compat.PY3:
pytest.skip(
"Bytes-related test - only needs to work on Python 3")
result = read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
def test_fwf_colspecs_is_list_or_tuple(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'column specifications must '
'be a list or tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(data),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'Each column specification '
'must be.+'):
read_fwf(StringIO(data), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
assert len(res)
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn",
"dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
pytest.skip("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = np.array([[1, 2., 4],
[5, np.nan, 10.]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = np.array([[1, 2334., 5],
[10, 13, 10]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with pytest.raises(TypeError):
read_fwf(StringIO(data), header=arg)
def test_full_file(self):
# File with all values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 Keanu Reeves 9315.45 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 Jennifer Love Hewitt 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65 5000.00 2/5/2007
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = """
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r"""
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves
33+++122.33\\\bar.........Gerard Butler
++44~~~~12.01 baz~~Jennifer Love Hewitt
~~55 11+++foo++++Jada Pinkett-Smith
..66++++++.03~~~bar Bill Murray
""".strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
pytest.skip(
'Bytes-related test - only needs to work on Python 3')
test = """
שלום שלום
ום שלל
של ום
""".strip('\r\n')
expected = read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)],
header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(
BytesIO(test.encode('utf8')), header=None, encoding='utf8'))
def test_dtype(self):
data = """ a b c
1 2 3.2
3 4 5.2
"""
colspecs = [(0, 5), (5, 10), (10, None)]
result = pd.read_fwf(StringIO(data), colspecs=colspecs)
expected = pd.DataFrame({
'a': [1, 3],
'b': [2, 4],
'c': [3.2, 5.2]}, columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype('int32')
result = pd.read_fwf(StringIO(data), colspecs=colspecs,
dtype={'a': 'float64', 'b': str, 'c': 'int32'})
tm.assert_frame_equal(result, expected)
def test_skiprows_inference(self):
# GH11256
test = """
Text contained in the file header
DataCol1 DataCol2
0.0 1.0
101.6 956.1
""".strip()
expected = read_csv(StringIO(test), skiprows=2,
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=2))
def test_skiprows_by_index_inference(self):
test = """
To be skipped
Not To Be Skipped
Once more to be skipped
123 34 8 123
456 78 9 456
""".strip()
expected = read_csv(StringIO(test), skiprows=[0, 2],
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=[0, 2]))
def test_skiprows_inference_empty(self):
test = """
AA BBB C
12 345 6
78 901 2
""".strip()
with pytest.raises(EmptyDataError):
read_fwf(StringIO(test), skiprows=3)
def test_whitespace_preservation(self):
# Addresses Issue #16772
data_expected = """
a ,bbb
cc,dd """
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a bbb
ccdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0], delimiter="\n\t")
tm.assert_frame_equal(result, expected)
def test_default_delimiter(self):
data_expected = """
a,bbb
cc,dd"""
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a \tbbb
cc\tdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0])
tm.assert_frame_equal(result, expected)
|
apache-2.0
|
manashmndl/scikit-learn
|
benchmarks/bench_plot_lasso_path.py
|
301
|
4003
|
"""Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
|
bsd-3-clause
|
ApolloAuto/apollo
|
modules/tools/prediction/data_pipelines/cruise_models.py
|
3
|
5275
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import logging
import os
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader, sampler
import h5py
import numpy as np
import sklearn
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from modules.tools.prediction.data_pipelines.common.configure import parameters
from proto.cruise_model_pb2 import TensorParameter, InputParameter,\
Conv1dParameter, DenseParameter, ActivationParameter, MaxPool1dParameter,\
AvgPool1dParameter, LaneFeatureConvParameter, ObsFeatureFCParameter,\
ClassifyParameter, RegressParameter, CruiseModelParameter
"""
@requirement:
pytorch 0.4.1
"""
'''
This file includes all model definitions and related loss functions.
'''
'''
Model details:
- Fully-connected layers for classification and regression, respectively.
- It will compute a classification score indicating the probability
of the obstacle choosing the given lane.
- It will also compute a time indicating how soon the obstacle will reach
the center of the given lane.
'''
class FullyConn_NN(torch.nn.Module):
def __init__(self):
super(FullyConn_NN, self).__init__()
self.classify = torch.nn.Sequential(
nn.Linear(174, 88),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(88, 55),
nn.Sigmoid(),
nn.Dropout(0.2),
nn.Linear(55, 23),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(23, 10),
nn.Sigmoid(),
nn.Dropout(0.0),
nn.Linear(10, 1),
nn.Sigmoid()
)
self.regress = torch.nn.Sequential(
nn.Linear(174, 88),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(88, 23),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(23, 1),
nn.ReLU()
)
def forward(self, x):
out_c = self.classify(x)
out_r = self.regress(x)
return out_c, out_r
class FCNN_CNN1D(torch.nn.Module):
def __init__(self):
super(FCNN_CNN1D, self).__init__()
self.lane_feature_conv = torch.nn.Sequential(
nn.Conv1d(4, 10, 3, stride=1),\
# nn.BatchNorm1d(10),\
nn.ReLU(),\
#nn.Conv1d(10, 16, 3, stride=2),\
# nn.BatchNorm1d(16),\
# nn.ReLU(),\
nn.Conv1d(10, 25, 3, stride=2),\
# nn.BatchNorm1d(25)
)
self.lane_feature_maxpool = nn.MaxPool1d(4)
self.lane_feature_avgpool = nn.AvgPool1d(4)
self.lane_feature_dropout = nn.Dropout(0.0)
self.obs_feature_fc = torch.nn.Sequential(
nn.Linear(68, 40),
nn.Sigmoid(),
nn.Dropout(0.0),
nn.Linear(40, 24),
nn.Sigmoid(),
nn.Dropout(0.0),
)
self.classify = torch.nn.Sequential(
nn.Linear(124, 66),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(66, 48),
nn.Sigmoid(),
nn.Dropout(0.1),
nn.Linear(48, 11),
nn.Sigmoid(),
nn.Dropout(0.1),
nn.Linear(11, 1),\
# nn.Sigmoid()
)
self.regress = torch.nn.Sequential(
nn.Linear(125, 77),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(77, 46),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(46, 12),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(12, 1),
nn.ReLU()
)
def forward(self, x):
lane_fea = x[:, -80:]
lane_fea = lane_fea.view(lane_fea.size(0), 4, 20)
obs_fea = x[:, :-80]
lane_fea = self.lane_feature_conv(lane_fea)
lane_fea_max = self.lane_feature_maxpool(lane_fea)
lane_fea_avg = self.lane_feature_avgpool(lane_fea)
lane_fea = torch.cat([lane_fea_max.view(lane_fea_max.size(0), -1),
lane_fea_avg.view(lane_fea_avg.size(0), -1)], 1)
lane_fea = self.lane_feature_dropout(lane_fea)
obs_fea = self.obs_feature_fc(obs_fea)
tot_fea = torch.cat([lane_fea, obs_fea], 1)
out_c = self.classify(tot_fea)
out_r = self.regress(torch.cat([tot_fea, out_c], 1))
return out_c, out_r
|
apache-2.0
|
felipemontefuscolo/bitme
|
tactic/TacticTakerV1.py
|
1
|
7357
|
import pandas as pd
from typing import List
from api import ExchangeInterface, Symbol
from common import Fill, OrderCommon, Quote, Trade, OrderType, FillType
from tactic import TacticInterface, PositionSim
import math
class TacticTakerV1(TacticInterface):
def __init__(self):
self.exchange = None # type: ExchangeInterface
self.max_qty = 10000
self.min_liq = 10000 # doesn't make much sense when we don't have the full book available
self.alpha = 1 # qty / price
self.leverage = 1
self.symbol = Symbol.XBTUSD
self.risk = 0.001 # percent
self.spread = 3.5
# buy, sell
self.buy_order = None # type: OrderCommon
self.sell_order = None # type: OrderCommon
# lower, upper
self.last_quote = None # type: Quote
self.rest = 0 # type: int
self.learning = 0 # unit of position
# this is to keep track of avg_entry_price
self.position = PositionSim(self.symbol, on_position_close=None)
# this should be None when self.position is not open
self.liq_price = None
"""
price = fcst - pos / a
buy = min( buy_best + 0.5, floor(fcst - 0.25))
sel = min( sel_best - 0.5, ceil(fcst + 0.25))
qty_buy = min(max_qty, round((price - buy) * a))
qty_sel = min(max_qty, round((sel - price) * a))
risk:
liq_price = e / (1 + side * risk)
"""
@staticmethod
def _floor(x):
return math.floor(2. * x) / 2
@staticmethod
def _ceil(x):
return math.ceil(2. * x) / 2
def initialize(self, exchange: ExchangeInterface, preferences: dict) -> None:
self.exchange = exchange
def pref(x):
#return '/{}/{}'.format(self.id(), x)
return x
if pref('max_qty') in preferences:
self.max_qty = int(preferences[pref('max_qty')])
if pref('min_liq') in preferences:
self.min_liq = int(preferences[pref('min_liq')])
if pref('alpha') in preferences:
self.alpha = float(preferences[pref('alpha')])
if pref('spread') in preferences:
self.spread = self._floor(float(preferences[pref('spread')]))
if pref('risk') in preferences:
self.risk = float(preferences[pref('risk')])
pass
def finalize(self) -> None:
pass
def get_symbol(self) -> Symbol:
return self.symbol
def _create_orders(self, learning: float, fcst: float, quote: Quote):
# TODO: improve to use full book information
buy_best = quote.bid_price if quote.bid_size >= self.min_liq else quote.bid_price - 0.5
sell_best = quote.ask_price if quote.ask_size >= self.min_liq else quote.ask_price + 0.5
buy_best -= self.spread
sell_best += self.spread
price = fcst - learning / self.alpha
buy = min(buy_best - 0.5, self._floor(price - 0.25))
sell = max(sell_best + 0.5, self._ceil(price + 0.25))
qty_buy = min(self.max_qty, round((price - buy) * self.alpha))
qty_sel = min(self.max_qty, round((sell - price) * self.alpha))
# TODO: improve quantities
qty_buy = min(qty_buy, qty_sel)
qty_sel = qty_buy
if qty_buy < 1:
self.learning = 0
if qty_buy >= 1:
self.buy_order = OrderCommon(symbol=self.symbol,
type=OrderType.Limit,
client_id=self.gen_order_id(),
signed_qty=qty_buy,
price=buy)
if qty_sel >= 1:
self.sell_order = OrderCommon(symbol=self.symbol,
type=OrderType.Limit,
client_id=self.gen_order_id(),
signed_qty=-qty_sel,
price=sell)
def _send_orders(self):
# TODO: create a way to reuse open orders
orders = []
if self.buy_order:
orders.append(self.buy_order)
if self.sell_order:
orders.append(self.sell_order)
if orders:
self.exchange.send_orders(orders)
def _liq_pos(self, pos: float):
self.exchange.send_orders([OrderCommon(symbol=self.symbol,
type=OrderType.Market,
client_id=self.gen_order_id(),
signed_qty=-pos)])
# override
def gen_order_id(self, suffix=None) -> str:
if suffix:
return super().gen_order_id() + suffix
else:
return super().gen_order_id()
def handle_trade(self, trade: Trade) -> None:
if not self.last_quote:
return
if self.rest > 0:
self.rest -= 1
return
if not self.buy_order and not self.sell_order:
fcst = (self.last_quote.bid_price + self.last_quote.ask_price) * 0.5
if self.position.signed_qty != 0:
raise ValueError("self.position.signed_qty is {}".format(self.position.signed_qty))
self._create_orders(learning=self.learning, fcst=fcst, quote=self.last_quote)
self._send_orders()
else:
side = self.position.side
should_liq = self.liq_price is not None and trade.price * side < self.liq_price * side
if should_liq:
self.learning += self.position.signed_qty
self._cancel_all()
self.exchange.close_position(self.symbol)
self.rest = 3
pass
def _cancel_all(self):
self.buy_order = None
self.sell_order = None
self.exchange.cancel_all_orders(self.symbol)
self.position.reset()
self.liq_price = None
def handle_quote(self, quote: Quote) -> None:
self.last_quote = quote
def handle_fill(self, fill: Fill) -> None:
if fill.side[0] == 'B':
side = +1
elif fill.side[0] == 'S':
side = -1
else:
raise ValueError("Invalid fill side: {}".format(fill.side))
signed_qty = fill.qty * side
self.position.update(signed_qty=signed_qty,
price=fill.price,
leverage=self.leverage,
current_timestamp=fill.fill_time,
fee=0.)
if self.position.is_open:
self.liq_price = self.position.avg_entry_price / (1 + self.position.side * self.risk)
else:
self.liq_price = None
if fill.fill_type == FillType.complete:
if side > 0:
self.buy_order = None
else:
self.sell_order = None
return
def handle_1m_candles(self, candles: pd.DataFrame) -> None:
pass
def handle_cancel(self, order: OrderCommon) -> None:
self._cancel_all()
self.rest = 3
def handle_liquidation(self, pnl: float):
raise AttributeError("This tactic should liquidate before bitmex liquidation")
@staticmethod
def id() -> str:
return 'TTV1'
|
mpl-2.0
|
RayMick/scikit-learn
|
benchmarks/bench_rcv1_logreg_convergence.py
|
149
|
7173
|
# Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
|
bsd-3-clause
|
jreeder/avoplot
|
src/avoplot/gui/text.py
|
3
|
20216
|
#Copyright (C) Nial Peters 2013
#
#This file is part of AvoPlot.
#
#AvoPlot is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#AvoPlot is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with AvoPlot. If not, see <http://www.gnu.org/licenses/>.
"""
The text module contains classes and functions for editing matplotlib Text
objects
"""
import wx
import matplotlib.text
from avoplot.gui import dialog
class AnimatedText:
def __init__(self, text_objects):
"""
Class to animate matplotlib.text.Text objects to allow fast editing of
text properties. The general useage of the class is as follows:
#create the animator passing it the text objects to be animated
animator = AnimatedText([txt1, txt2])
#start the animation - this caches the background and registers an event
#handler to re-cache the background if it gets changed
animator.start_text_animation()
#make changes to the text here e.g.:
txt1.set_color('r')
#each time you want the changes to be drawn - call redraw_text()
animator.redraw_text()
#don't forget to end the animation when you are done with it!
animator.stop_animation()
"""
#if a single Text object has been passed, wrap in a list
if isinstance(text_objects, matplotlib.text.Text):
text_objects = [text_objects]
for i,text_obj in enumerate(text_objects):
if not isinstance(text_obj, matplotlib.text.Text):
raise TypeError("At index %d: expecting matplotlib.text.Text instance"
", got %s" % (i,type(text_obj)))
#all the text objects must be in the same figure - it's hard to see why
#this wouldn't be the case, but just in case
figs = set([t.get_figure() for t in text_objects])
assert len(figs) == 1, "All Text objects must be in the same Figure."
self.__text_objects = text_objects
self.__redraw_callback_id = None
self.__bkgd_cache = None
self.__mpl_fig = self.__text_objects[0].get_figure()
def start_text_animation(self):
"""
Start the animation of the text. This must be called before you call
redraw_text. This creates a cache of the figure background and registers
a callback to update the cache if the background gets changed.
"""
#we have to protect against this method being called again
#before the CallAfter call has completed
if self.__redraw_callback_id is None:
self.__redraw_callback_id = -1
wx.CallAfter(self.__start_text_animation)
def __start_text_animation(self):
self.__caching_in_progress = False
#register a callback for draw events in the mpl canvas - if the canvas
#has been redrawn then we need to re-cache the background region
self.__redraw_callback_id = self.__mpl_fig.canvas.mpl_connect('draw_event', self.__cache_bkgd)
#now cache the background region
self.__cache_bkgd()
def stop_text_animation(self):
"""
Deletes the background cache and removes the update callback. Should be
called whenever you are finished animating the text
"""
assert self.__redraw_callback_id is not None, ("stop_text_animation() "
"called before "
"start_text_animation()")
#disconnect the event handler for canvas draw events
self.__mpl_fig.canvas.mpl_disconnect(self.__redraw_callback_id)
self.__redraw_callback_id = None
#let the cached background get garbage collected
self.__bkgd_cache = None
def __cache_bkgd(self, *args):
#This method is a bit of a hack! Since canvas.draw still renders some
#matplotlib artists even if they are marked as animated, we instead
#set the alpha value of the artists to zero, then draw everything and
#store the resulting canvas in a cache which can be used later to
#restore regions of the background. The *real* alpha values of the
#artists is then restored and the canvas is redrawn. This causes flicker
#of the animated artists - but at least it seems to work.
if self.__caching_in_progress:
self.__caching_in_progress = False
return
self.__caching_in_progress = True
#record the alpha values of the text objects so they can be restored
prev_alphas = [t.get_alpha() for t in self.__text_objects]
#hide the text objects by setting their alpha values to zero - note that
#using set_visible(False) instead leads to problems with layout.
for t in self.__text_objects:
t.set_alpha(0)
self.__mpl_fig.canvas.draw()
self.__bkgd_cache = self.__mpl_fig.canvas.copy_from_bbox(self.__mpl_fig.bbox)
#now unhide the text by restoring its alpha value
for i,t in enumerate(self.__text_objects):
t.set_alpha(prev_alphas[i])
self.__mpl_fig.draw_artist(t)
self.__mpl_fig.canvas.blit(self.__mpl_fig.bbox)
def redraw_text(self):
"""
Restores the whole background region from the cache and then draws the
animated text objects over the top. You should call this every time you
change the text and want the changes to be drawn to the screen.
"""
assert self.__bkgd_cache is not None, ("redraw_text() called before "
"__bkgd_cache was set. Have you "
"called start_text_animation()?")
#restore the whole figure background from the cached backgroud
self.__mpl_fig.canvas.restore_region(self.__bkgd_cache)
#now draw just the text objects (which have changed)
for t in self.__text_objects:
self.__mpl_fig.draw_artist(t)
#blit the updated display to the screen
self.__mpl_fig.canvas.blit(self.__mpl_fig.bbox)
class TextPropertiesEditor(dialog.AvoPlotDialog):
"""
Dialog which allows the user to edit the text properties (colour, font etc.)
of a set of matplotlib.text.Text object. The Text objects to be edited should be
passed to the dialog constructor - which will accept either a single Text
object or an iterable of Text objects
"""
def __init__(self, parent, text_objects):
#if a single Text object has been passed, wrap in a list
if isinstance(text_objects, matplotlib.text.Text):
text_objects = [text_objects]
for i,text_obj in enumerate(text_objects):
if not isinstance(text_obj, matplotlib.text.Text):
raise TypeError("At index %d: expecting matplotlib.text.Text instance"
", got %s" % (i,type(text_obj)))
self.__text_objects = text_objects
dialog.AvoPlotDialog.__init__(self, parent, "Text properties")
vsizer = wx.BoxSizer(wx.VERTICAL)
button_sizer = wx.BoxSizer(wx.HORIZONTAL)
#add the font properties panel
self.font_props_panel = FontPropertiesPanel(self, text_objects)
vsizer.Add(self.font_props_panel, 0, wx.EXPAND)
#create main buttons for editor frame
self.ok_button = wx.Button(self, wx.ID_ANY, "Ok")
button_sizer.Add(self.ok_button, 0, wx.ALIGN_TOP | wx.ALIGN_RIGHT)
vsizer.Add(button_sizer, 0, wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT | wx.ALL, border=10)
#register main button event callbacks
wx.EVT_BUTTON(self, self.ok_button.GetId(), self.on_ok)
wx.EVT_CLOSE(self, self.on_close)
self.SetSizer(vsizer)
vsizer.Fit(self)
self.SetAutoLayout(True)
self.CentreOnParent()
self.ShowModal()
def on_close(self, evnt):
"""
Event handler for frame close events
"""
self.EndModal(wx.ID_CANCEL)
self.font_props_panel.Destroy()
self.Destroy()
def on_apply(self, evnt):
"""
Event handler for Apply button clicks
"""
for txt_obj in self.__text_objects:
self.apply_to(txt_obj)
def on_ok(self, evnt):
"""
Event handler for Ok button clicks.
"""
#self.apply_to(self.main_text_obj)
self.EndModal(wx.ID_OK)
self.font_props_panel.Destroy()
self.Destroy()
def apply_to(self, text_obj):
"""
Applies the selected properties to text_obj which must be a
matplotlib.text.Text object
"""
if not isinstance(text_obj, matplotlib.text.Text):
raise TypeError("Expecting matplotlib.text.Text instance"
", got %s" % (type(text_obj)))
#set the font
text_obj.set_family(self.font_props_panel.get_font_name())
#set font colour
text_obj.set_color(self.font_props_panel.get_font_colour())
#set font size
text_obj.set_size(self.font_props_panel.get_font_size())
#set the weight
text_obj.set_weight(self.font_props_panel.get_font_weight())
#set the style
text_obj.set_style(self.font_props_panel.get_font_style())
#set the font stretch
text_obj.set_stretch(self.font_props_panel.get_font_stretch())
#update the display
text_obj.figure.canvas.draw()
class FontPropertiesPanel(wx.Panel):
"""
Panel to hold the text property editing controls within the
TextPropertiesEditor dialog. The Text object to be edited should be passed
to the constructor.
Note that this panel does not use blitting methods to get fast text
animation because that causes layout problems for certain text objects -
e.g. the layout of axis labels and tick-labels are related to each other,
and this is not honoured properly if blitting methods are used.
"""
def __init__(self, parent, text_objects):
wx.Panel.__init__(self, parent, wx.ID_ANY)
self.__text_objects = text_objects
self.mpl_figure = text_objects[0].get_figure()
hsizer = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer = wx.FlexGridSizer(cols=2, vgap=5)
#create a list of available truetype fonts on this system
self.avail_fonts = sorted(list(set([f.name for f in matplotlib.font_manager.fontManager.ttflist])))
#create a font selection listbox
self.font_selector = wx.ListBox(self, wx.ID_ANY, choices=self.avail_fonts)
hsizer.Add(self.font_selector, 1, wx.ALIGN_TOP | wx.ALIGN_LEFT | wx.ALL,
border=10)
#set the initial font selection to that of the text object
cur_fonts = list(set([t.get_fontname() for t in text_objects]))
if len(cur_fonts) == 1:
self.font_selector.SetSelection(self.avail_fonts.index(cur_fonts[0]))
#create a colour picker button
text = wx.StaticText(self, -1, "Colour: ")
grid_sizer.Add(text, 0, wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_RIGHT)
wx.EVT_LISTBOX(self, self.font_selector.GetId(), self.on_font_selection)
#set the colour picker's initial value to that of the text object
prev_cols = [matplotlib.colors.colorConverter.to_rgb(t.get_color()) for t in text_objects]
#TODO - what if the text objects have different colors
prev_col = (255 * prev_cols[0][0], 255 * prev_cols[0][1], 255 * prev_cols[0][2])
self.colour_picker = wx.ColourPickerCtrl(self, -1, prev_col)
grid_sizer.Add(self.colour_picker, 0 ,
wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_LEFT)
wx.EVT_COLOURPICKER_CHANGED(self, self.colour_picker.GetId(), self.on_font_colour)
#create a font size control and set the initial value to that of the text
text = wx.StaticText(self, -1, "Size: ")
grid_sizer.Add(text, 0, wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_RIGHT)
prev_size = [t.get_size() for t in text_objects][0]
#TODO - what if the text objects have different sizes
self.size_ctrl = wx.SpinCtrl(self, wx.ID_ANY, min=4, max=100,
initial=prev_size)
grid_sizer.Add(self.size_ctrl, 0 , wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_LEFT)
wx.EVT_SPINCTRL(self, self.size_ctrl.GetId(), self.on_font_size)
#create a drop-down box for specifying font weight
self.possible_weights = ['ultralight', 'light', 'normal', 'regular',
'book', 'medium', 'roman', 'semibold',
'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black']
text = wx.StaticText(self, -1, "Weight: ")
grid_sizer.Add(text, 0, wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_RIGHT)
self.weight_ctrl = wx.Choice(self, wx.ID_ANY, choices=self.possible_weights)
grid_sizer.Add(self.weight_ctrl, 0, wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_LEFT)
wx.EVT_CHOICE(self, self.weight_ctrl.GetId(), self.on_font_weight)
#set the initial font weight selection to that of the text, this is a
#bit tricky since get_weight can return an integer or a string
cur_weight = [t.get_weight() for t in text_objects][0]
#TODO - what if the text objects have different weights
if not type(cur_weight) is str:
idx = int(round(cur_weight / 1000.0 * len(self.possible_weights), 0))
else:
idx = self.possible_weights.index(cur_weight)
self.weight_ctrl.SetSelection(idx)
#create a drop down box for specifying font style
self.possible_styles = ['normal', 'italic', 'oblique']
text = wx.StaticText(self, -1, "Style: ")
grid_sizer.Add(text, 0, wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_RIGHT)
self.style_ctrl = wx.Choice(self, wx.ID_ANY, choices=self.possible_styles)
grid_sizer.Add(self.style_ctrl, 0 , wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_LEFT)
wx.EVT_CHOICE(self, self.style_ctrl.GetId(), self.on_font_style)
#set the initial font style selection to that of the text
cur_style = [t.get_style() for t in text_objects][0]
#TODO - what if the text objects have different styles
idx = self.possible_styles.index(cur_style)
self.style_ctrl.SetSelection(idx)
#create a drop down box for selecting font stretch
self.possible_stretches = ['ultra-condensed', 'extra-condensed',
'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded',
'ultra-expanded']
text = wx.StaticText(self, -1, "Stretch: ")
grid_sizer.Add(text, 0, wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_RIGHT)
self.stretch_ctrl = wx.Choice(self, wx.ID_ANY, choices=self.possible_stretches)
grid_sizer.Add(self.stretch_ctrl, 0 , wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_LEFT)
wx.EVT_CHOICE(self, self.stretch_ctrl.GetId(), self.on_font_stretch)
#set the initial font stretch selection to that of the text, this is a
#bit tricky since get_weight can return an integer or a string
cur_stretch = [t.get_stretch() for t in text_objects][0]
#TODO - what if the text objects have different stretches
if not type(cur_stretch) is str:
idx = int(round(cur_stretch / 1000.0 * len(self.possible_stretches), 0))
else:
idx = self.possible_stretches.index(cur_stretch)
self.stretch_ctrl.SetSelection(idx)
hsizer.Add(grid_sizer, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT | wx.ALL,
border=10)
self.SetSizer(hsizer)
hsizer.Fit(self)
self.SetAutoLayout(True)
def redraw_text(self):
"""
Redraws the text - does not use blitting for fast animation because that
causes layout problems in some cases.
"""
self.mpl_figure.canvas.draw()
def on_font_selection(self, evnt):
"""
Event handler for font selection events.
"""
new_font = evnt.GetString()
for t in self.__text_objects:
t.set_family(new_font)
self.redraw_text()
def on_font_colour(self, evnt):
"""
Event handler for font colour change events.
"""
new_colour = evnt.GetColour().GetAsString(wx.C2S_HTML_SYNTAX)
for t in self.__text_objects:
t.set_color(new_colour)
self.redraw_text()
def on_font_size(self, evnt):
"""
Event handler for font size change events.
"""
new_size = evnt.GetInt()
for t in self.__text_objects:
t.set_size(new_size)
self.redraw_text()
def on_font_weight(self, evnt):
"""
Event handler for font weight (e.g. bold) change events.
"""
new_weight = self.possible_weights[evnt.GetSelection()]
for t in self.__text_objects:
t.set_weight(new_weight)
self.redraw_text()
def on_font_style(self, evnt):
"""
Event handler for font style (e.g. italic) change events.
"""
new_style = evnt.GetString()
for t in self.__text_objects:
t.set_style(new_style)
self.redraw_text()
def on_font_stretch(self, evnt):
"""
Event handler for font stretch (e.g. compressed) change events.
"""
new_stretch = evnt.GetString()
for t in self.__text_objects:
t.set_stretch(new_stretch)
self.redraw_text()
def get_font_colour(self):
"""
Returns the currently selected font colour (as a HTML string).
"""
return self.colour_picker.GetColour().GetAsString(wx.C2S_HTML_SYNTAX)
def get_font_size(self):
"""
Returns the currently selected font size (integer number of points)
"""
return self.size_ctrl.GetValue()
def get_font_name(self):
"""
Returns the name of the currently selected font.
"""
return self.avail_fonts[self.font_selector.GetSelection()]
def get_font_weight(self):
"""
Returns the weight of the font currently selected
"""
return self.possible_weights[self.weight_ctrl.GetSelection()]
def get_font_style(self):
"""
Returns the style ('normal', 'italic' etc.) of the font currently
selected
"""
return self.possible_styles[self.style_ctrl.GetSelection()]
def get_font_stretch(self):
"""
Returns the stretch of the font currently selected
"""
return self.possible_stretches[self.stretch_ctrl.GetSelection()]
|
gpl-3.0
|
jason-neal/companion_simulations
|
Notebooks/Spectrum_Normalizations.py
|
1
|
14459
|
# coding: utf-8
# # Spectrum Continuum Normalization
#
# ## Aim:
# - To perform Chi^2 comparision between PHOENIX ACES spectra and my CRIRES observations.
#
# ## Problem:
# - The nomalization of the observed spectra
# - Differences in the continuum normalization affect the chi^2 comparison when using mixed models of two different spectra.
#
# Proposed Solution:
# - equation (1) from [Passegger 2016](https://arxiv.org/pdf/1601.01877.pdf)
# Fobs = F obs * (cont_fit model / cont_fit observation) where con_fit is a linear fit to the spectra.
# To take out and linear trends in the continuums and correct the amplitude of the continuum.
#
#
# In this notebook I outline what I do currently showing an example.
#
#
#
#
# In[1]:
import copy
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
#%matplotlib auto
# The obeservatios were originally automatically continuum normalized in the iraf extraction pipeline.
#
# I believe the continuum is not quite at 1 here anymore due to the divsion by the telluric spectra.
# In[2]:
# Observation
obs = fits.getdata("/home/jneal/.handy_spectra/HD211847-1-mixavg-tellcorr_1.fits")
plt.plot(obs["wavelength"], obs["flux"])
plt.hlines(1, 2111, 2124, linestyle="--")
plt.title("CRIRES spectra")
plt.xlabel("Wavelength (nm)")
plt.show()
# The two PHOENIX ACES spectra here are the first best guess of the two spectral components.
# In[3]:
# Models
wav_model = fits.getdata("/home/jneal/Phd/data/PHOENIX-ALL/PHOENIX/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits")
wav_model /= 10 # nm
host = "/home/jneal/Phd/data/PHOENIX-ALL/PHOENIX/Z-0.0/lte05700-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
old_companion = "/home/jneal/Phd/data/PHOENIX-ALL/PHOENIX/Z-0.0/lte02600-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
companion = "/home/jneal/Phd/data/PHOENIX-ALL/PHOENIX/Z-0.0/lte02300-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
host_f = fits.getdata(host)
comp_f = fits.getdata(companion)
plt.plot(wav_model, host_f, label="Host")
plt.plot(wav_model, comp_f, label="Companion")
plt.title("Phoenix spectra")
plt.xlabel("Wavelength (nm)")
plt.legend()
plt.show()
mask = (2000 < wav_model) & (wav_model < 2200)
wav_model = wav_model[mask]
host_f = host_f[mask]
comp_f = comp_f[mask]
plt.plot(wav_model, host_f, label="Host")
plt.plot(wav_model, comp_f, label="Companion")
plt.title("Phoenix spectra")
plt.legend()
plt.xlabel("Wavelength (nm)")
plt.show()
# In[ ]:
# # Current Normalization
# I then continuum normalize the Phoenix spectrum locally around my observations
# by fitting an **exponenital** to the continuum like so.
#
# - Split the spectrum into 50 bins
# - Take median of 20 highest points in each bin.
# - Fix an exponetial
# - Evaulate at the orginal wavelength values
# - Divide original by the fit
#
# In[4]:
def get_continuum_points(wave, flux, splits=50, top=20):
"""Get continuum points along a spectrum.
This splits a spectrum into "splits" number of bins and calculates
the medain wavelength and flux of the upper "top" number of flux
values.
"""
# Shorten array until can be evenly split up.
remainder = len(flux) % splits
if remainder:
# Nozero reainder needs this slicing
wave = wave[:-remainder]
flux = flux[:-remainder]
wave_shaped = wave.reshape((splits, -1))
flux_shaped = flux.reshape((splits, -1))
s = np.argsort(flux_shaped, axis=-1)[:, -top:]
s_flux = np.array([ar1[s1] for ar1, s1 in zip(flux_shaped, s)])
s_wave = np.array([ar1[s1] for ar1, s1 in zip(wave_shaped, s)])
wave_points = np.median(s_wave, axis=-1)
flux_points = np.median(s_flux, axis=-1)
assert len(flux_points) == splits
return wave_points, flux_points
def continuum(wave, flux, splits=50, method='scalar', plot=False, top=20):
"""Fit continuum of flux.
top: is number of top points to take median of continuum.
"""
org_wave = wave[:]
org_flux = flux[:]
# Get continuum value in chunked sections of spectrum.
wave_points, flux_points = get_continuum_points(wave, flux, splits=splits, top=top)
poly_num = {"scalar": 0, "linear": 1, "quadratic": 2, "cubic": 3}
if method == "exponential":
z = np.polyfit(wave_points, np.log(flux_points), deg=1, w=np.sqrt(flux_points))
p = np.poly1d(z)
norm_flux = np.exp(p(org_wave)) # Un-log the y values.
else:
z = np.polyfit(wave_points, flux_points, poly_num[method])
p = np.poly1d(z)
norm_flux = p(org_wave)
if plot:
plt.subplot(211)
plt.plot(wave, flux)
plt.plot(wave_points, flux_points, "x-", label="points")
plt.plot(org_wave, norm_flux, label='norm_flux')
plt.legend()
plt.subplot(212)
plt.plot(org_wave, org_flux / norm_flux)
plt.title("Normalization")
plt.xlabel("Wavelength (nm)")
plt.show()
return norm_flux
# In[5]:
#host_cont = local_normalization(wav_model, host_f, splits=50, method="exponential", plot=True)
host_continuum = continuum(wav_model, host_f, splits=50, method="exponential", plot=True)
host_cont = host_f / host_continuum
# In[6]:
#comp_cont = local_normalization(wav_model, comp_f, splits=50, method="exponential", plot=True)
comp_continuum = continuum(wav_model, comp_f, splits=50, method="exponential", plot=True)
comp_cont = comp_f / comp_continuum
# Above the top is the unnormalize spectra, with the median points in orangeand the green line the continuum fit. The bottom plot is the contiuum normalized result
# In[7]:
plt.plot(wav_model, comp_cont, label="Companion")
plt.plot(wav_model, host_cont-0.3, label="Host")
plt.title("Continuum Normalized (with -0.3 offset)")
plt.xlabel("Wavelength (nm)")
plt.legend()
plt.show()
# In[8]:
plt.plot(wav_model[20:200], comp_cont[20:200], label="Companion")
plt.plot(wav_model[20:200], host_cont[20:200], label="Host")
plt.title("Continuum Normalized - close up")
plt.xlabel("Wavelength (nm)")
ax = plt.gca()
ax.get_xaxis().get_major_formatter().set_useOffset(False)
plt.legend()
plt.show()
# # Combining Spectra
# I then mix the models using a combination of the two spectra.
# In this case with NO RV shifts.
# In[9]:
def mix(h, c, alpha):
return (h + c * alpha) / (1 + alpha)
mix1 = mix(host_cont, comp_cont, 0.01) # 1% of the companion spectra
mix2 = mix(host_cont, comp_cont, 0.05) # 5% of the companion spectra
# plt.plot(wav_model[20:100], comp_cont[20:100], label="comp")
plt.plot(wav_model[20:100], host_cont[20:100], label="host")
plt.plot(wav_model[20:100], mix1[20:100], label="mix 1%")
plt.plot(wav_model[20:100], mix2[20:100], label="mix 5%")
plt.xlabel("Wavelength (nm)")
plt.legend()
plt.show()
# The companion is cooler there are many more deeper lines present in the spectra.
# Even a small contribution of the companion spectra reduce the continuum of the mixed spectra considerably.
#
# When I compare these mixed spectra to my observations
# In[10]:
mask = (wav_model > np.min(obs["wavelength"])) & (wav_model < np.max(obs["wavelength"]))
plt.plot(wav_model[mask], mix1[mask], label="mix 1%")
plt.plot(wav_model[mask], mix2[mask], label="mix 5%")
plt.plot(obs["wavelength"], obs["flux"], label="obs")
#plt.xlabel("Wavelength (nm)")
plt.legend()
plt.show()
# In[11]:
# Zoomed in
plt.plot(wav_model[mask], mix2[mask], label="mix 5%")
plt.plot(wav_model[mask], mix1[mask], label="mix 1%")
plt.plot(obs["wavelength"], obs["flux"], label="obs")
plt.xlabel("Wavelength (nm)")
plt.legend()
plt.xlim([2112, 2117])
plt.ylim([0.9, 1.1])
plt.title("Zoomed")
plt.show()
# As you can see here my observations are above the continuum most of the time.
# What I have noticed is this drastically affects the chisquared result as the mix model is the one with the least amount of alpha.
#
# I am thinking of renormalizing my observations by implementing equation (1) from [Passegger 2016](https://arxiv.org/pdf/1601.01877.pdf) *(Fundamental M-dwarf parameters from high-resolution spectra using PHOENIX ACES modesl)*
#
# F_obs = F_obs * (continuum_fit model / continuum_fit observation)
#
# They fit a linear function to the continuum of the observation and computed spectra to account for *"slight differences in the continuum level and possible linear trends between the already noramlized spectra."*
#
# - One difference is that they say they normalize the **average** flux of the spectra to unity. Would this make a difference in this method.
#
#
# ## Questions
# - Would this be the correct approach to take to solve this?
# - Should I renomalize the observations first as well?
# - Am I treating the cooler M-dwarf spectra correctly in this approach?
#
# Attempting the Passegger method
# In[12]:
from scipy.interpolate import interp1d
# mix1_norm = continuum(wav_model, mix1, splits=50, method="linear", plot=False)
# mix2_norm = local_normalization(wav_model, mix2, splits=50, method="linear", plot=False)
obs_continuum = continuum(obs["wavelength"], obs["flux"], splits=20, method="linear", plot=True)
linear1 = continuum(wav_model, mix1, splits=50, method="linear", plot=True)
linear2 = continuum(wav_model, mix2, splits=50, method="linear", plot=False)
obs_renorm1 = obs["flux"] * (interp1d(wav_model, linear1)(obs["wavelength"]) / obs_continuum)
obs_renorm2 = obs["flux"] * (interp1d(wav_model, linear2)(obs["wavelength"]) / obs_continuum)
# In[13]:
# Just a scalar
# mix1_norm = local_normalization(wav_model, mix1, splits=50, method="scalar", plot=False)
# mix2_norm = local_normalization(wav_model, mix2, splits=50, method="scalar", plot=False)
obs_scalar = continuum(obs["wavelength"], obs["flux"], splits=20, method="scalar", plot=False)
scalar1 = continuum(wav_model, mix1, splits=50, method="scalar", plot=True)
scalar2 = continuum(wav_model, mix2, splits=50, method="scalar", plot=False)
print(scalar2)
obs_renorm_scalar1 = obs["flux"] * (interp1d(wav_model, scalar1)(obs["wavelength"]) / obs_scalar)
obs_renorm_scalar2 = obs["flux"] * (interp1d(wav_model, scalar2)(obs["wavelength"]) / obs_scalar)
# In[14]:
plt.plot(obs["wavelength"], obs_scalar, label="scalar observed")
plt.plot(obs["wavelength"], obs_continuum, label="linear observed")
plt.plot(obs["wavelength"], interp1d(wav_model, scalar1)(obs["wavelength"]), label="scalar 1%")
plt.plot(obs["wavelength"], interp1d(wav_model, linear1)(obs["wavelength"]), label="linear 1%")
plt.plot(obs["wavelength"], interp1d(wav_model, scalar2)(obs["wavelength"]), label="scalar 5%")
plt.plot(obs["wavelength"], interp1d(wav_model, linear2)(obs["wavelength"]), label="linear 5%")
plt.title("Linear and Scalar continuum renormalizations.")
plt.legend()
plt.show()
# In[18]:
plt.plot(obs["wavelength"], obs["flux"], label="obs", alpha =0.6)
plt.plot(obs["wavelength"], obs_renorm1, label="linear norm")
plt.plot(obs["wavelength"], obs_renorm_scalar1, label="scalar norm")
plt.plot(wav_model[mask], mix1[mask], label="mix 1%")
plt.legend()
plt.title("1% model")
plt.hlines(1, 2111, 2124, linestyle="--", alpha=0.2)
plt.show()
plt.plot(obs["wavelength"], obs["flux"], label="obs", alpha =0.6)
plt.plot(obs["wavelength"], obs_renorm1, label="linear norm")
plt.plot(obs["wavelength"], obs_renorm_scalar1, label="scalar norm")
plt.plot(wav_model[mask], mix1[mask], label="mix 1%")
plt.legend()
plt.title("1% model, zoom")
plt.xlim([2120, 2122])
plt.hlines(1, 2111, 2124, linestyle="--", alpha=0.2)
plt.show()
# In[16]:
plt.plot(obs["wavelength"], obs["flux"], label="obs", alpha =0.6)
plt.plot(obs["wavelength"], obs_renorm2, label="linear norm")
plt.plot(obs["wavelength"], obs_renorm_scalar2, label="scalar norm")
plt.plot(wav_model[mask], mix2[mask], label="mix 5%")
plt.legend()
plt.title("5% model")
plt.hlines(1, 2111, 2124, linestyle="--", alpha=0.2)
plt.show()
# In[17]:
plt.plot(obs["wavelength"], obs["flux"], label="obs", alpha =0.6)
plt.plot(obs["wavelength"], obs_renorm2, label="linear norm")
plt.plot(obs["wavelength"], obs_renorm_scalar2, label="scalar norm")
plt.plot(wav_model[mask], mix2[mask], label="mix 5%")
plt.legend()
plt.title("5% model zoomed")
plt.xlim([2120, 2122])
plt.hlines(1, 2111, 2124, linestyle="--", alpha=0.2)
plt.show()
# In this example for the 5% companion spectra there is a bit of difference between the linear and scalar normalizations. With a larger difference at the longer wavelength. (more orange visible above the red.) Faint blue is the spectrum before the renormalization.
# In[ ]:
# # Range of phoenix spectra
#
#
# In[19]:
wav_model = fits.getdata("/home/jneal/Phd/data/PHOENIX-ALL/PHOENIX/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits")
wav_model /= 10 # nm
temps = [2300, 3000, 4000, 5000]
mask1 = (1000 < wav_model) & (wav_model < 3300)
masked_wav1 = wav_model[mask1]
for temp in temps[::-1]:
file = "/home/jneal/Phd/data/PHOENIX-ALL/PHOENIX/Z-0.0/lte0{0}-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits".format(temp)
host_f = fits.getdata(file)
plt.plot(masked_wav1, host_f[mask1], label="Teff={}".format(temp))
plt.title("Phoenix spectra")
plt.xlabel("Wavelength (nm)")
plt.legend()
plt.show()
# In[20]:
mask = (2000 < wav_model) & (wav_model < 2300)
masked_wav = wav_model[mask]
for temp in temps[::-1]:
file = "/home/jneal/Phd/data/PHOENIX-ALL/PHOENIX/Z-0.0/lte0{0}-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits".format(temp)
host_f = fits.getdata(file)
host_f = host_f[mask]
plt.plot(masked_wav, host_f, label="Teff={}".format(temp))
plt.title("Phoenix spectra")
plt.xlabel("Wavelength (nm)")
plt.legend()
plt.show()
# In[21]:
# Observations
for chip in range(1,5):
obs = fits.getdata("/home/jneal/.handy_spectra/HD211847-1-mixavg-tellcorr_{}.fits".format(chip))
plt.plot(obs["wavelength"], obs["flux"], label="chip {}".format(chip))
plt.hlines(1, 2111, 2165, linestyle="--")
plt.title("CRIRES spectrum HD211847")
plt.xlabel("Wavelength (nm)")
plt.legend()
plt.show()
# In[22]:
# Observations
for chip in range(1,5):
obs = fits.getdata("/home/jneal/.handy_spectra/HD30501-1-mixavg-tellcorr_{}.fits".format(chip))
plt.plot(obs["wavelength"], obs["flux"], label="chip {}".format(chip))
plt.hlines(1, 2111, 2165, linestyle="--")
plt.title("CRIRES spectrum HD30501")
plt.xlabel("Wavelength (nm)")
plt.legend()
plt.show()
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
mit
|
ViDA-NYU/domain_discovery_API
|
models/RadvizModel.py
|
1
|
3447
|
from radviz import Radviz
import numpy as np
from collections import OrderedDict
import json
from sklearn import linear_model
from online_classifier.tf_vector import tf_vectorizer
from domain_discovery_model import DomainModel
from elastic.config import es, es_doc_type, es_server
from elastic.get_config import get_available_domains, get_model_tags
from fetch_data import fetch_data
#import urllib2
#from bs4 import BeautifulSoup
class RadvizModel(DomainModel):
radviz = None
def __init__(self, path):
self._path = path
self._es = es
self._domains = get_available_domains(self._es)
self._mapping = {"url":"url", "timestamp":"retrieved", "text":"text", "html":"html", "tag":"tag", "query":"query", "domain":"domain", "title":"title"}
#super(RadvizModel, self).__init__(path)
def _esInfo(self, domainId):
self._domains = get_available_domains(self._es)
es_info = {
"activeDomainIndex": self._domains[domainId]['index'],
"docType": self._domains[domainId]['doc_type']
}
if not self._domains[domainId].get("mapping") is None:
es_info["mapping"] = self._domains[domainId]["mapping"]
else:
es_info["mapping"] = self._mapping
return es_info
def getRadvizPoints(self, session, filterByTerm):
es_info = self._esInfo(session['domainId'])
index = es_info['activeDomainIndex']
max_features = 200
#session['pagesCap'] = 12
if session.get('from') is None:
session['from'] = 0
format = '%m/%d/%Y %H:%M %Z'
if not session.get('fromDate') is None:
session['fromDate'] = long(DomainModel.convert_to_epoch(datetime.strptime(session['fromDate'], format)))
if not session.get('toDate') is None:
session['toDate'] = long(DomainModel.convert_to_epoch(datetime.strptime(session['toDate'], format)))
results_data = self.getTextQuery(session)
ddteval_data = fetch_data(results_data["results"], es_doc_type=es_doc_type, es=es)
data = ddteval_data["data"]
labels = ddteval_data["labels"]
urls = ddteval_data["urls"]
tf_v = tf_vectorizer(convert_to_ascii=True, max_features=max_features)
[X, features] = tf_v.vectorize(data)
matrix_transpose = np.transpose(X.todense())
print "\n\n Number of 1-gram features = ", len(features)
print "\n\n tf 1-gram matrix size = ", np.shape(X)
# data = self.radviz.loadData_pkl("data/ht_data_200.pkl").todense()
# data = np.transpose(data)
# features = self.radviz.loadFeatures("data/ht_data_features_200.csv")
# print features
# print len(features)
# labels = self.radviz.loadLabels("data/ht_data_labels_200.csv")
# urls = self.radviz.loadSampleNames("data/ht_data_urls_200.csv")
self.radviz = Radviz(X, features, labels, urls)
return_obj = {}
for i in range(0, len(features)):
return_obj[features[i]] = matrix_transpose[i,:].tolist()[0]
labels_urls = OrderedDict([("labels",labels), ("urls",urls), ("title", ddteval_data["title"]),("snippet",ddteval_data["snippet"]),("image_url",ddteval_data["image_url"])])
od = OrderedDict(list(OrderedDict(sorted(return_obj.items())).items()) + list(labels_urls.items()))
return od
def computeTSP(self):
return self.radviz.compute_tsp()
|
gpl-3.0
|
upliftaero/MAVProxy
|
MAVProxy/mavproxy.py
|
2
|
39686
|
#!/usr/bin/env python
'''
mavproxy - a MAVLink proxy program
Copyright Andrew Tridgell 2011
Released under the GNU GPL version 3 or later
'''
import sys, os, time, socket, signal
import fnmatch, errno, threading
import serial, Queue, select
import traceback
import select
import shlex
from MAVProxy.modules.lib import textconsole
from MAVProxy.modules.lib import rline
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import dumpstacks
# adding all this allows pyinstaller to build a working windows executable
# note that using --hidden-import does not work for these modules
try:
from multiprocessing import freeze_support
from pymavlink import mavwp, mavutil
import matplotlib, HTMLParser
try:
import readline
except ImportError:
import pyreadline as readline
except Exception:
pass
if __name__ == '__main__':
freeze_support()
class MPStatus(object):
'''hold status information about the mavproxy'''
def __init__(self):
self.gps = None
self.msgs = {}
self.msg_count = {}
self.counters = {'MasterIn' : [], 'MasterOut' : 0, 'FGearIn' : 0, 'FGearOut' : 0, 'Slave' : 0}
self.setup_mode = opts.setup
self.mav_error = 0
self.altitude = 0
self.last_altitude_announce = 0.0
self.last_distance_announce = 0.0
self.exit = False
self.flightmode = 'MAV'
self.last_mode_announce = 0
self.logdir = None
self.last_heartbeat = 0
self.last_message = 0
self.heartbeat_error = False
self.last_apm_msg = None
self.last_apm_msg_time = 0
self.highest_msec = 0
self.have_gps_lock = False
self.lost_gps_lock = False
self.last_gps_lock = 0
self.watch = None
self.last_streamrate1 = -1
self.last_streamrate2 = -1
self.last_seq = 0
self.armed = False
def show(self, f, pattern=None):
'''write status to status.txt'''
if pattern is None:
f.write('Counters: ')
for c in self.counters:
f.write('%s:%s ' % (c, self.counters[c]))
f.write('\n')
f.write('MAV Errors: %u\n' % self.mav_error)
f.write(str(self.gps)+'\n')
for m in sorted(self.msgs.keys()):
if pattern is not None and not fnmatch.fnmatch(str(m).upper(), pattern.upper()):
continue
f.write("%u: %s\n" % (self.msg_count[m], str(self.msgs[m])))
def write(self):
'''write status to status.txt'''
f = open('status.txt', mode='w')
self.show(f)
f.close()
def say_text(text, priority='important'):
'''text output - default function for say()'''
mpstate.console.writeln(text)
def say(text, priority='important'):
'''text and/or speech output'''
mpstate.functions.say(text, priority)
def add_input(cmd, immediate=False):
'''add some command input to be processed'''
if immediate:
process_stdin(cmd)
else:
mpstate.input_queue.put(cmd)
class MAVFunctions(object):
'''core functions available in modules'''
def __init__(self):
self.process_stdin = add_input
self.param_set = param_set
self.get_mav_param = get_mav_param
self.say = say_text
# input handler can be overridden by a module
self.input_handler = None
class MPState(object):
'''holds state of mavproxy'''
def __init__(self):
self.console = textconsole.SimpleConsole()
self.map = None
self.map_functions = {}
self.vehicle_type = None
self.vehicle_name = None
from MAVProxy.modules.lib.mp_settings import MPSettings, MPSetting
self.settings = MPSettings(
[ MPSetting('link', int, 1, 'Primary Link', tab='Link', range=(0,4), increment=1),
MPSetting('streamrate', int, 4, 'Stream rate link1', range=(-1,20), increment=1),
MPSetting('streamrate2', int, 4, 'Stream rate link2', range=(-1,20), increment=1),
MPSetting('heartbeat', int, 1, 'Heartbeat rate', range=(0,5), increment=1),
MPSetting('mavfwd', bool, True, 'Allow forwarded control'),
MPSetting('mavfwd_rate', bool, False, 'Allow forwarded rate control'),
MPSetting('shownoise', bool, True, 'Show non-MAVLink data'),
MPSetting('baudrate', int, opts.baudrate, 'baudrate for new links', range=(0,10000000), increment=1),
MPSetting('rtscts', bool, opts.rtscts, 'enable flow control'),
MPSetting('select_timeout', float, 0.01, 'select timeout'),
MPSetting('altreadout', int, 10, 'Altitude Readout',
range=(0,100), increment=1, tab='Announcements'),
MPSetting('distreadout', int, 200, 'Distance Readout', range=(0,10000), increment=1),
MPSetting('moddebug', int, opts.moddebug, 'Module Debug Level', range=(0,3), increment=1, tab='Debug'),
MPSetting('compdebug', int, 0, 'Computation Debug Mask', range=(0,3), tab='Debug'),
MPSetting('flushlogs', bool, False, 'Flush logs on every packet'),
MPSetting('requireexit', bool, False, 'Require exit command'),
MPSetting('wpupdates', bool, True, 'Announce waypoint updates'),
MPSetting('basealt', int, 0, 'Base Altitude', range=(0,30000), increment=1, tab='Altitude'),
MPSetting('wpalt', int, 100, 'Default WP Altitude', range=(0,10000), increment=1),
MPSetting('rallyalt', int, 90, 'Default Rally Altitude', range=(0,10000), increment=1),
MPSetting('terrainalt', str, 'Auto', 'Use terrain altitudes', choice=['Auto','True','False']),
MPSetting('rally_breakalt', int, 40, 'Default Rally Break Altitude', range=(0,10000), increment=1),
MPSetting('rally_flags', int, 0, 'Default Rally Flags', range=(0,10000), increment=1),
MPSetting('source_system', int, 255, 'MAVLink Source system', range=(0,255), increment=1, tab='MAVLink'),
MPSetting('source_component', int, 0, 'MAVLink Source component', range=(0,255), increment=1),
MPSetting('target_system', int, 0, 'MAVLink target system', range=(0,255), increment=1),
MPSetting('target_component', int, 0, 'MAVLink target component', range=(0,255), increment=1),
MPSetting('state_basedir', str, None, 'base directory for logs and aircraft directories')
])
self.completions = {
"script" : ["(FILENAME)"],
"set" : ["(SETTING)"],
"status" : ["(VARIABLE)"],
"module" : ["list",
"load (AVAILMODULES)",
"<unload|reload> (LOADEDMODULES)"]
}
self.status = MPStatus()
# master mavlink device
self.mav_master = None
# mavlink outputs
self.mav_outputs = []
self.sysid_outputs = {}
# SITL output
self.sitl_output = None
self.mav_param = mavparm.MAVParmDict()
self.modules = []
self.public_modules = {}
self.functions = MAVFunctions()
self.select_extra = {}
self.continue_mode = False
self.aliases = {}
import platform
self.system = platform.system()
def module(self, name):
'''Find a public module (most modules are private)'''
if name in self.public_modules:
return self.public_modules[name]
return None
def master(self):
'''return the currently chosen mavlink master object'''
if len(self.mav_master) == 0:
return None
if self.settings.link > len(self.mav_master):
self.settings.link = 1
# try to use one with no link error
if not self.mav_master[self.settings.link-1].linkerror:
return self.mav_master[self.settings.link-1]
for m in self.mav_master:
if not m.linkerror:
return m
return self.mav_master[self.settings.link-1]
def get_mav_param(param, default=None):
'''return a EEPROM parameter value'''
return mpstate.mav_param.get(param, default)
def param_set(name, value, retries=3):
'''set a parameter'''
name = name.upper()
return mpstate.mav_param.mavset(mpstate.master(), name, value, retries=retries)
def cmd_script(args):
'''run a script'''
if len(args) < 1:
print("usage: script <filename>")
return
run_script(args[0])
def cmd_set(args):
'''control mavproxy options'''
mpstate.settings.command(args)
def cmd_status(args):
'''show status'''
if len(args) == 0:
mpstate.status.show(sys.stdout, pattern=None)
else:
for pattern in args:
mpstate.status.show(sys.stdout, pattern=pattern)
def cmd_setup(args):
mpstate.status.setup_mode = True
mpstate.rl.set_prompt("")
def cmd_reset(args):
print("Resetting master")
mpstate.master().reset()
def cmd_watch(args):
'''watch a mavlink packet pattern'''
if len(args) == 0:
mpstate.status.watch = None
return
mpstate.status.watch = args[0]
print("Watching %s" % mpstate.status.watch)
def load_module(modname, quiet=False):
'''load a module'''
modpaths = ['MAVProxy.modules.mavproxy_%s' % modname, modname]
for (m,pm) in mpstate.modules:
if m.name == modname:
if not quiet:
print("module %s already loaded" % modname)
return False
for modpath in modpaths:
try:
m = import_package(modpath)
reload(m)
module = m.init(mpstate)
if isinstance(module, mp_module.MPModule):
mpstate.modules.append((module, m))
if not quiet:
print("Loaded module %s" % (modname,))
return True
else:
ex = "%s.init did not return a MPModule instance" % modname
break
except ImportError as msg:
ex = msg
if mpstate.settings.moddebug > 1:
import traceback
print(traceback.format_exc())
print("Failed to load module: %s. Use 'set moddebug 3' in the MAVProxy console to enable traceback" % ex)
return False
def unload_module(modname):
'''unload a module'''
for (m,pm) in mpstate.modules:
if m.name == modname:
if hasattr(m, 'unload'):
m.unload()
mpstate.modules.remove((m,pm))
print("Unloaded module %s" % modname)
return True
print("Unable to find module %s" % modname)
return False
def cmd_module(args):
'''module commands'''
usage = "usage: module <list|load|reload|unload>"
if len(args) < 1:
print(usage)
return
if args[0] == "list":
for (m,pm) in mpstate.modules:
print("%s: %s" % (m.name, m.description))
elif args[0] == "load":
if len(args) < 2:
print("usage: module load <name>")
return
load_module(args[1])
elif args[0] == "reload":
if len(args) < 2:
print("usage: module reload <name>")
return
modname = args[1]
pmodule = None
for (m,pm) in mpstate.modules:
if m.name == modname:
pmodule = pm
if pmodule is None:
print("Module %s not loaded" % modname)
return
if unload_module(modname):
import zipimport
try:
reload(pmodule)
except ImportError:
clear_zipimport_cache()
reload(pmodule)
if load_module(modname, quiet=True):
print("Reloaded module %s" % modname)
elif args[0] == "unload":
if len(args) < 2:
print("usage: module unload <name>")
return
modname = os.path.basename(args[1])
unload_module(modname)
else:
print(usage)
def cmd_alias(args):
'''alias commands'''
usage = "usage: alias <add|remove|list>"
if len(args) < 1 or args[0] == "list":
if len(args) >= 2:
wildcard = args[1].upper()
else:
wildcard = '*'
for a in sorted(mpstate.aliases.keys()):
if fnmatch.fnmatch(a.upper(), wildcard):
print("%-15s : %s" % (a, mpstate.aliases[a]))
elif args[0] == "add":
if len(args) < 3:
print(usage)
return
a = args[1]
mpstate.aliases[a] = ' '.join(args[2:])
elif args[0] == "remove":
if len(args) != 2:
print(usage)
return
a = args[1]
if a in mpstate.aliases:
mpstate.aliases.pop(a)
else:
print("no alias %s" % a)
else:
print(usage)
return
def clear_zipimport_cache():
"""Clear out cached entries from _zip_directory_cache.
See http://www.digi.com/wiki/developer/index.php/Error_messages"""
import sys, zipimport
syspath_backup = list(sys.path)
zipimport._zip_directory_cache.clear()
# load back items onto sys.path
sys.path = syspath_backup
# add this too: see https://mail.python.org/pipermail/python-list/2005-May/353229.html
sys.path_importer_cache.clear()
# http://stackoverflow.com/questions/211100/pythons-import-doesnt-work-as-expected
# has info on why this is necessary.
def import_package(name):
"""Given a package name like 'foo.bar.quux', imports the package
and returns the desired module."""
import zipimport
try:
mod = __import__(name)
except ImportError:
clear_zipimport_cache()
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
command_map = {
'script' : (cmd_script, 'run a script of MAVProxy commands'),
'setup' : (cmd_setup, 'go into setup mode'),
'reset' : (cmd_reset, 'reopen the connection to the MAVLink master'),
'status' : (cmd_status, 'show status'),
'set' : (cmd_set, 'mavproxy settings'),
'watch' : (cmd_watch, 'watch a MAVLink pattern'),
'module' : (cmd_module, 'module commands'),
'alias' : (cmd_alias, 'command aliases')
}
def process_stdin(line):
'''handle commands from user'''
if line is None:
sys.exit(0)
# allow for modules to override input handling
if mpstate.functions.input_handler is not None:
mpstate.functions.input_handler(line)
return
line = line.strip()
if mpstate.status.setup_mode:
# in setup mode we send strings straight to the master
if line == '.':
mpstate.status.setup_mode = False
mpstate.status.flightmode = "MAV"
mpstate.rl.set_prompt("MAV> ")
return
if line != '+++':
line += '\r'
for c in line:
time.sleep(0.01)
mpstate.master().write(c)
return
if not line:
return
args = shlex.split(line)
cmd = args[0]
while cmd in mpstate.aliases:
line = mpstate.aliases[cmd]
args = shlex.split(line) + args[1:]
cmd = args[0]
if cmd == 'help':
k = command_map.keys()
k.sort()
for cmd in k:
(fn, help) = command_map[cmd]
print("%-15s : %s" % (cmd, help))
return
if cmd == 'exit' and mpstate.settings.requireexit:
mpstate.status.exit = True
return
if not cmd in command_map:
for (m,pm) in mpstate.modules:
if hasattr(m, 'unknown_command'):
try:
if m.unknown_command(args):
return
except Exception as e:
print("ERROR in command: %s" % str(e))
print("Unknown command '%s'" % line)
return
(fn, help) = command_map[cmd]
try:
fn(args[1:])
except Exception as e:
print("ERROR in command %s: %s" % (args[1:], str(e)))
if mpstate.settings.moddebug > 1:
traceback.print_exc()
def process_master(m):
'''process packets from the MAVLink master'''
try:
s = m.recv(16*1024)
except Exception:
time.sleep(0.1)
return
# prevent a dead serial port from causing the CPU to spin. The user hitting enter will
# cause it to try and reconnect
if len(s) == 0:
time.sleep(0.1)
return
if (mpstate.settings.compdebug & 1) != 0:
return
if mpstate.logqueue_raw:
mpstate.logqueue_raw.put(str(s))
if mpstate.status.setup_mode:
if mpstate.system == 'Windows':
# strip nsh ansi codes
s = s.replace("\033[K","")
sys.stdout.write(str(s))
sys.stdout.flush()
return
if m.first_byte and opts.auto_protocol:
m.auto_mavlink_version(s)
msgs = m.mav.parse_buffer(s)
if msgs:
for msg in msgs:
sysid = msg.get_srcSystem()
if sysid in mpstate.sysid_outputs:
# the message has been handled by a specialised handler for this system
continue
if getattr(m, '_timestamp', None) is None:
m.post_message(msg)
if msg.get_type() == "BAD_DATA":
if opts.show_errors:
mpstate.console.writeln("MAV error: %s" % msg)
mpstate.status.mav_error += 1
def process_mavlink(slave):
'''process packets from MAVLink slaves, forwarding to the master'''
try:
buf = slave.recv()
except socket.error:
return
try:
if slave.first_byte and opts.auto_protocol:
slave.auto_mavlink_version(buf)
msgs = slave.mav.parse_buffer(buf)
except mavutil.mavlink.MAVError as e:
mpstate.console.error("Bad MAVLink slave message from %s: %s" % (slave.address, e.message))
return
if msgs is None:
return
if mpstate.settings.mavfwd and not mpstate.status.setup_mode:
for m in msgs:
if mpstate.status.watch is not None:
if fnmatch.fnmatch(m.get_type().upper(), mpstate.status.watch.upper()):
mpstate.console.writeln('> '+ str(m))
mpstate.master().write(m.get_msgbuf())
mpstate.status.counters['Slave'] += 1
def mkdir_p(dir):
'''like mkdir -p'''
if not dir:
return
if dir.endswith("/"):
mkdir_p(dir[:-1])
return
if os.path.isdir(dir):
return
mkdir_p(os.path.dirname(dir))
os.mkdir(dir)
def log_writer():
'''log writing thread'''
while True:
mpstate.logfile_raw.write(mpstate.logqueue_raw.get())
while not mpstate.logqueue_raw.empty():
mpstate.logfile_raw.write(mpstate.logqueue_raw.get())
while not mpstate.logqueue.empty():
mpstate.logfile.write(mpstate.logqueue.get())
if mpstate.settings.flushlogs:
mpstate.logfile.flush()
mpstate.logfile_raw.flush()
# If state_basedir is NOT set then paths for logs and aircraft
# directories are relative to mavproxy's cwd
def log_paths():
'''Returns tuple (logdir, telemetry_log_filepath, raw_telemetry_log_filepath)'''
if opts.aircraft is not None:
if opts.mission is not None:
print(opts.mission)
dirname = "%s/logs/%s/Mission%s" % (opts.aircraft, time.strftime("%Y-%m-%d"), opts.mission)
else:
dirname = "%s/logs/%s" % (opts.aircraft, time.strftime("%Y-%m-%d"))
# dirname is currently relative. Possibly add state_basedir:
if mpstate.settings.state_basedir is not None:
dirname = os.path.join(mpstate.settings.state_basedir,dirname)
mkdir_p(dirname)
highest = None
for i in range(1, 10000):
fdir = os.path.join(dirname, 'flight%u' % i)
if not os.path.exists(fdir):
break
highest = fdir
if mpstate.continue_mode and highest is not None:
fdir = highest
elif os.path.exists(fdir):
print("Flight logs full")
sys.exit(1)
logname = 'flight.tlog'
logdir = fdir
else:
logname = os.path.basename(opts.logfile)
dir_path = os.path.dirname(opts.logfile)
if not os.path.isabs(dir_path) and mpstate.settings.state_basedir is not None:
dir_path = os.path.join(mpstate.settings.state_basedir,dir_path)
logdir = dir_path
mkdir_p(logdir)
return (logdir,
os.path.join(logdir, logname),
os.path.join(logdir, logname + '.raw'))
def open_telemetry_logs(logpath_telem, logpath_telem_raw):
'''open log files'''
if opts.append_log or opts.continue_mode:
mode = 'a'
else:
mode = 'w'
mpstate.logfile = open(logpath_telem, mode=mode)
mpstate.logfile_raw = open(logpath_telem_raw, mode=mode)
print("Log Directory: %s" % mpstate.status.logdir)
print("Telemetry log: %s" % logpath_telem)
# use a separate thread for writing to the logfile to prevent
# delays during disk writes (important as delays can be long if camera
# app is running)
t = threading.Thread(target=log_writer, name='log_writer')
t.daemon = True
t.start()
def set_stream_rates():
'''set mavlink stream rates'''
if (not msg_period.trigger() and
mpstate.status.last_streamrate1 == mpstate.settings.streamrate and
mpstate.status.last_streamrate2 == mpstate.settings.streamrate2):
return
mpstate.status.last_streamrate1 = mpstate.settings.streamrate
mpstate.status.last_streamrate2 = mpstate.settings.streamrate2
for master in mpstate.mav_master:
if master.linknum == 0:
rate = mpstate.settings.streamrate
else:
rate = mpstate.settings.streamrate2
if rate != -1:
master.mav.request_data_stream_send(mpstate.settings.target_system, mpstate.settings.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL,
rate, 1)
def check_link_status():
'''check status of master links'''
tnow = time.time()
if mpstate.status.last_message != 0 and tnow > mpstate.status.last_message + 5:
say("no link")
mpstate.status.heartbeat_error = True
for master in mpstate.mav_master:
if not master.linkerror and (tnow > master.last_message + 5 or master.portdead):
say("link %u down" % (master.linknum+1))
master.linkerror = True
def send_heartbeat(master):
if master.mavlink10():
master.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS, mavutil.mavlink.MAV_AUTOPILOT_INVALID,
0, 0, 0)
else:
MAV_GROUND = 5
MAV_AUTOPILOT_NONE = 4
master.mav.heartbeat_send(MAV_GROUND, MAV_AUTOPILOT_NONE)
def periodic_tasks():
'''run periodic checks'''
if mpstate.status.setup_mode:
return
if (mpstate.settings.compdebug & 2) != 0:
return
if mpstate.settings.heartbeat != 0:
heartbeat_period.frequency = mpstate.settings.heartbeat
if heartbeat_period.trigger() and mpstate.settings.heartbeat != 0:
mpstate.status.counters['MasterOut'] += 1
for master in mpstate.mav_master:
send_heartbeat(master)
if heartbeat_check_period.trigger():
check_link_status()
set_stream_rates()
# call optional module idle tasks. These are called at several hundred Hz
for (m,pm) in mpstate.modules:
if hasattr(m, 'idle_task'):
try:
m.idle_task()
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
elif mpstate.settings.moddebug > 1:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
# also see if the module should be unloaded:
if m.needs_unloading:
unload_module(m.name)
def main_loop():
'''main processing loop'''
if not mpstate.status.setup_mode and not opts.nowait:
for master in mpstate.mav_master:
send_heartbeat(master)
if master.linknum == 0:
print("Waiting for heartbeat from %s" % master.address)
master.wait_heartbeat()
set_stream_rates()
while True:
if mpstate is None or mpstate.status.exit:
return
while not mpstate.input_queue.empty():
line = mpstate.input_queue.get()
mpstate.input_count += 1
cmds = line.split(';')
if len(cmds) == 1 and cmds[0] == "":
mpstate.empty_input_count += 1
for c in cmds:
process_stdin(c)
for master in mpstate.mav_master:
if master.fd is None:
if master.port.inWaiting() > 0:
process_master(master)
periodic_tasks()
rin = []
for master in mpstate.mav_master:
if master.fd is not None and not master.portdead:
rin.append(master.fd)
for m in mpstate.mav_outputs:
rin.append(m.fd)
for sysid in mpstate.sysid_outputs:
m = mpstate.sysid_outputs[sysid]
rin.append(m.fd)
if rin == []:
time.sleep(0.0001)
continue
for fd in mpstate.select_extra:
rin.append(fd)
try:
(rin, win, xin) = select.select(rin, [], [], mpstate.settings.select_timeout)
except select.error:
continue
if mpstate is None:
return
for fd in rin:
if mpstate is None:
return
for master in mpstate.mav_master:
if fd == master.fd:
process_master(master)
if mpstate is None:
return
continue
for m in mpstate.mav_outputs:
if fd == m.fd:
process_mavlink(m)
if mpstate is None:
return
continue
for sysid in mpstate.sysid_outputs:
m = mpstate.sysid_outputs[sysid]
if fd == m.fd:
process_mavlink(m)
if mpstate is None:
return
continue
# this allow modules to register their own file descriptors
# for the main select loop
if fd in mpstate.select_extra:
try:
# call the registered read function
(fn, args) = mpstate.select_extra[fd]
fn(args)
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
# on an exception, remove it from the select list
mpstate.select_extra.pop(fd)
def input_loop():
'''wait for user input'''
while mpstate.status.exit != True:
try:
if mpstate.status.exit != True:
line = raw_input(mpstate.rl.prompt)
except EOFError:
mpstate.status.exit = True
sys.exit(1)
mpstate.input_queue.put(line)
def run_script(scriptfile):
'''run a script file'''
try:
f = open(scriptfile, mode='r')
except Exception:
return
mpstate.console.writeln("Running script %s" % scriptfile)
for line in f:
line = line.strip()
if line == "" or line.startswith('#'):
continue
if line.startswith('@'):
line = line[1:]
else:
mpstate.console.writeln("-> %s" % line)
process_stdin(line)
f.close()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser("mavproxy.py [options]")
parser.add_option("--master", dest="master", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink master port and optional baud rate",
default=[])
parser.add_option("--out", dest="output", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink output port and optional baud rate",
default=[])
parser.add_option("--baudrate", dest="baudrate", type='int',
help="default serial baud rate", default=57600)
parser.add_option("--sitl", dest="sitl", default=None, help="SITL output port")
parser.add_option("--streamrate",dest="streamrate", default=4, type='int',
help="MAVLink stream rate")
parser.add_option("--source-system", dest='SOURCE_SYSTEM', type='int',
default=255, help='MAVLink source system for this GCS')
parser.add_option("--source-component", dest='SOURCE_COMPONENT', type='int',
default=0, help='MAVLink source component for this GCS')
parser.add_option("--target-system", dest='TARGET_SYSTEM', type='int',
default=0, help='MAVLink target master system')
parser.add_option("--target-component", dest='TARGET_COMPONENT', type='int',
default=0, help='MAVLink target master component')
parser.add_option("--logfile", dest="logfile", help="MAVLink master logfile",
default='mav.tlog')
parser.add_option("-a", "--append-log", dest="append_log", help="Append to log files",
action='store_true', default=False)
parser.add_option("--quadcopter", dest="quadcopter", help="use quadcopter controls",
action='store_true', default=False)
parser.add_option("--setup", dest="setup", help="start in setup mode",
action='store_true', default=False)
parser.add_option("--nodtr", dest="nodtr", help="disable DTR drop on close",
action='store_true', default=False)
parser.add_option("--show-errors", dest="show_errors", help="show MAVLink error packets",
action='store_true', default=False)
parser.add_option("--speech", dest="speech", help="use text to speach",
action='store_true', default=False)
parser.add_option("--aircraft", dest="aircraft", help="aircraft name", default=None)
parser.add_option("--cmd", dest="cmd", help="initial commands", default=None, action='append')
parser.add_option("--console", action='store_true', help="use GUI console")
parser.add_option("--map", action='store_true', help="load map module")
parser.add_option(
'--load-module',
action='append',
default=[],
help='Load the specified module. Can be used multiple times, or with a comma separated list')
parser.add_option("--mav09", action='store_true', default=False, help="Use MAVLink protocol 0.9")
parser.add_option("--auto-protocol", action='store_true', default=False, help="Auto detect MAVLink protocol version")
parser.add_option("--nowait", action='store_true', default=False, help="don't wait for HEARTBEAT on startup")
parser.add_option("-c", "--continue", dest='continue_mode', action='store_true', default=False, help="continue logs")
parser.add_option("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_option("--rtscts", action='store_true', help="enable hardware RTS/CTS flow control")
parser.add_option("--moddebug", type=int, help="module debug level", default=0)
parser.add_option("--mission", dest="mission", help="mission name", default=None)
parser.add_option("--daemon", action='store_true', help="run in daemon mode, do not start interactive shell")
parser.add_option("--profile", action='store_true', help="run the Yappi python profiler")
parser.add_option("--state-basedir", default=None, help="base directory for logs and aircraft directories")
parser.add_option("--version", action='store_true', help="version information")
parser.add_option("--default-modules", default="log,wp,rally,fence,param,relay,tuneopt,arm,mode,calibration,rc,auxopt,misc,cmdlong,battery,terrain,output", help='default module list')
(opts, args) = parser.parse_args()
# warn people about ModemManager which interferes badly with APM and Pixhawk
if os.path.exists("/usr/sbin/ModemManager"):
print("WARNING: You should uninstall ModemManager as it conflicts with APM and Pixhawk")
if opts.mav09:
os.environ['MAVLINK09'] = '1'
from pymavlink import mavutil, mavparm
mavutil.set_dialect(opts.dialect)
#version information
if opts.version:
import pkg_resources
version = pkg_resources.require("mavproxy")[0].version
print "MAVProxy is a modular ground station using the mavlink protocol"
print "MAVProxy Version: " + version
sys.exit(1)
# global mavproxy state
mpstate = MPState()
mpstate.status.exit = False
mpstate.command_map = command_map
mpstate.continue_mode = opts.continue_mode
# queues for logging
mpstate.logqueue = Queue.Queue()
mpstate.logqueue_raw = Queue.Queue()
if opts.speech:
# start the speech-dispatcher early, so it doesn't inherit any ports from
# modules/mavutil
load_module('speech')
if not opts.master:
serial_list = mavutil.auto_detect_serial(preferred_list=['*FTDI*',"*Arduino_Mega_2560*", "*3D_Robotics*", "*USB_to_UART*", '*PX4*', '*FMU*'])
print('Auto-detected serial ports are:')
for port in serial_list:
print("%s" % port)
# container for status information
mpstate.settings.target_system = opts.TARGET_SYSTEM
mpstate.settings.target_component = opts.TARGET_COMPONENT
mpstate.mav_master = []
mpstate.rl = rline.rline("MAV> ", mpstate)
def quit_handler(signum = None, frame = None):
#print 'Signal handler called with signal', signum
if mpstate.status.exit:
print 'Clean shutdown impossible, forcing an exit'
sys.exit(0)
else:
mpstate.status.exit = True
# Listen for kill signals to cleanly shutdown modules
fatalsignals = [signal.SIGTERM]
try:
fatalsignals.append(signal.SIGHUP)
fatalsignals.append(signal.SIGQUIT)
except Exception:
pass
if opts.daemon: # SIGINT breaks readline parsing - if we are interactive, just let things die
fatalsignals.append(signal.SIGINT)
for sig in fatalsignals:
signal.signal(sig, quit_handler)
load_module('link', quiet=True)
mpstate.settings.source_system = opts.SOURCE_SYSTEM
mpstate.settings.source_component = opts.SOURCE_COMPONENT
# open master link
for mdev in opts.master:
if not mpstate.module('link').link_add(mdev):
sys.exit(1)
if not opts.master and len(serial_list) == 1:
print("Connecting to %s" % serial_list[0])
mpstate.module('link').link_add(serial_list[0].device)
elif not opts.master:
wifi_device = '0.0.0.0:14550'
mpstate.module('link').link_add(wifi_device)
# open any mavlink output ports
for port in opts.output:
mpstate.mav_outputs.append(mavutil.mavlink_connection(port, baud=int(opts.baudrate), input=False))
if opts.sitl:
mpstate.sitl_output = mavutil.mavudp(opts.sitl, input=False)
mpstate.settings.streamrate = opts.streamrate
mpstate.settings.streamrate2 = opts.streamrate
if opts.state_basedir is not None:
mpstate.settings.state_basedir = opts.state_basedir
msg_period = mavutil.periodic_event(1.0/15)
heartbeat_period = mavutil.periodic_event(1)
heartbeat_check_period = mavutil.periodic_event(0.33)
mpstate.input_queue = Queue.Queue()
mpstate.input_count = 0
mpstate.empty_input_count = 0
if opts.setup:
mpstate.rl.set_prompt("")
# call this early so that logdir is setup based on --aircraft
(mpstate.status.logdir, logpath_telem, logpath_telem_raw) = log_paths()
if not opts.setup:
# some core functionality is in modules
standard_modules = opts.default_modules.split(',')
for m in standard_modules:
load_module(m, quiet=True)
if opts.console:
process_stdin('module load console')
if opts.map:
process_stdin('module load map')
for module in opts.load_module:
modlist = module.split(',')
for mod in modlist:
process_stdin('module load %s' % mod)
if 'HOME' in os.environ and not opts.setup:
start_script = os.path.join(os.environ['HOME'], ".mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
if 'LOCALAPPDATA' in os.environ and not opts.setup:
start_script = os.path.join(os.environ['LOCALAPPDATA'], "MAVProxy", "mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
if opts.aircraft is not None:
start_script = os.path.join(opts.aircraft, "mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
else:
print("no script %s" % start_script)
if opts.cmd is not None:
for cstr in opts.cmd:
cmds = cstr.split(';')
for c in cmds:
process_stdin(c)
if opts.profile:
import yappi # We do the import here so that we won't barf if run normally and yappi not available
yappi.start()
# log all packets from the master, for later replay
open_telemetry_logs(logpath_telem, logpath_telem_raw)
# run main loop as a thread
mpstate.status.thread = threading.Thread(target=main_loop, name='main_loop')
mpstate.status.thread.daemon = True
mpstate.status.thread.start()
# use main program for input. This ensures the terminal cleans
# up on exit
while (mpstate.status.exit != True):
try:
if opts.daemon:
time.sleep(0.1)
else:
input_loop()
except KeyboardInterrupt:
if mpstate.settings.requireexit:
print("Interrupt caught. Use 'exit' to quit MAVProxy.")
#Just lost the map and console, get them back:
for (m,pm) in mpstate.modules:
if m.name in ["map", "console"]:
if hasattr(m, 'unload'):
try:
m.unload()
except Exception:
pass
reload(m)
m.init(mpstate)
else:
mpstate.status.exit = True
sys.exit(1)
if opts.profile:
yappi.get_func_stats().print_all()
yappi.get_thread_stats().print_all()
#this loop executes after leaving the above loop and is for cleanup on exit
for (m,pm) in mpstate.modules:
if hasattr(m, 'unload'):
print("Unloading module %s" % m.name)
m.unload()
sys.exit(1)
|
gpl-3.0
|
nelango/ViralityAnalysis
|
model/lib/sklearn/decomposition/tests/test_dict_learning.py
|
67
|
9084
|
import numpy as np
from sklearn.utils import check_array
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
|
mit
|
santis19/tesina-fisica
|
Flexion/modelo-sintetico/4-flex-y-lito-0.8/lib/double_window_selection.py
|
2
|
3825
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
class DoubleWindowSelection(object):
def __init__(self,x,y,ax1,ax2):
self.x = x
self.y = y
#~ self.z = z
self.dx = abs(self.x[0][0] - self.x[0][-1])/(np.shape(self.x)[1]-1)
self.dy = abs(self.y[0][0] - self.y[-1][0])/(np.shape(self.y)[0]-1)
assert self.dx == self.dy, "dx != dy"
self.rect1 = Rectangle((0,0), 1, 1,fc='None')
self.rect2 = Rectangle((0,0), 1, 1,fc='None')
self.x_center, self.y_center = None, None
self.half_width = (min(np.shape(self.x))/8)*self.dx
self.x1, self.x2 = None, None
self.y1, self.y2 = None, None
self.ax1 = ax1
self.ax2 = ax2
self.l1, = self.ax1.plot([self.x_center],[self.y_center],'o')
self.l2, = self.ax2.plot([self.x_center],[self.y_center],'o')
self.ax1.add_patch(self.rect1)
self.ax2.add_patch(self.rect2)
self.ax1.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax1.figure.canvas.mpl_connect('scroll_event', self.on_scroll)
self.ax1.figure.canvas.mpl_connect('key_press_event', self.on_key)
print "\nWINDOWS INSTRUCTIONS:"
print "Click to select the window center"
print "Move the center with arrows or click again"
print "Resize the window with the mouse scroll or with '+' and '-'"
print "Press 'i' to show information about the window"
def on_press(self,event):
if event.inaxes == self.ax1 or event.inaxes == self.ax2:
if event.button == 1:
self.x_center, self.y_center = event.xdata, event.ydata
#~ self.x_center, self.y_center = nearest_point(self.x, self.y,
#~ self.x_center, self.y_center)
self.x_center -= self.x_center%self.dx
self.y_center -= self.y_center%self.dx
self.rectangle_construction()
else:
return
def on_scroll(self,event):
self.half_width += event.step * self.dx
self.rectangle_construction()
def on_key(self,event):
event_list = ["right","left","up","down"]
if event.key in event_list:
if event.key == "right":
self.x_center += self.dx
elif event.key == "left":
self.x_center -= self.dx
elif event.key == "up":
self.y_center += self.dx
elif event.key == "down":
self.y_center -= self.dx
self.rectangle_construction()
if event.key == "i":
print "(x,y)=",(self.x_center,self.y_center),"Width:",self.half_width*2
if event.key == "+" or event.key == "-":
if event.key == "+":
self.half_width += self.dx
elif event.key == "-":
self.half_width -= self.dx
self.rectangle_construction()
def rectangle_construction(self):
self.x1 = self.x_center - self.half_width
self.x2 = self.x_center + self.half_width
self.y1 = self.y_center - self.half_width
self.y2 = self.y_center + self.half_width
self.rect1.set_width(self.x2 - self.x1)
self.rect1.set_height(self.y2 - self.y1)
self.rect1.set_xy((self.x1, self.y1))
self.l1.set_xdata([self.x_center])
self.l1.set_ydata([self.y_center])
self.rect2.set_width(self.x2 - self.x1)
self.rect2.set_height(self.y2 - self.y1)
self.rect2.set_xy((self.x1, self.y1))
self.l2.set_xdata([self.x_center])
self.l2.set_ydata([self.y_center])
self.ax1.figure.canvas.draw()
|
gpl-2.0
|
riddlezyc/geolab
|
src/energyforce/metad/contourplot.py
|
1
|
3539
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
# 0 for ca, 1 for mg
sim = 0
dirName = ''
if sim == 0:
dirName = r"F:\simulations\zyj\caco3-metad\metad/"
if sim == 1:
dirName = r"F:\simulations\zyj\mgco3-metad\metad/"
fileName = dirName + "fes.dat"
# beacuse plt.contour plot need x(1d), y(1d), and z(2d), it's better to load them to different lists
x = []
y = []
z = []
with open(fileName, 'r') as file:
for line in file:
line = line.strip()
if len(line) and line[0] != '#':
if float(line.split()[0]) not in x:
x.append(float(line.split()[0]))
if float(line.split()[1]) not in y:
y.append(float(line.split()[1]))
z.append(float(line.split()[2]))
z = np.array(z)
# convert z(1d) to a 2d matrix
z = z.reshape((len(x), len(y)))
# for color map, one can "from matplotlib import cm" or create it
# two ways to create colormap (cmap)
# 1, from_list method (recommand)
# 2, pass a dict of r,g,b (trival)
color = [(0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 1)] # B-G-R-W
bgrw = LinearSegmentedColormap.from_list('my_bgrw', color, N=100)
# rgb position on image, color change from xx to xx, discontinue
# you should cover the whole position range on image, i.e., first col range from 0 to 1
# cdict1 = {'red': ((0.0, 0.0, 0.0),
# (0.25, 0.0, 0.0),
# (0.5, 0.0, 1.0),
# (0.75, 1.0, 1.0),
# (1.0, 0.0, 0.0)),
#
# 'green': ((0.0, 0.0, 0.0),
# (0.25, 0.0, 0.0),
# (0.5, 1.0, 1.0),
# (0.75, 0.0, 0.0),
# (1.0, 0.0, 0.0)),
#
# 'blue': ((0.0, 1.0, 1.0),
# (0.25, 0.0, 1.0),
# (0.5, 0.0, 0.0),
# (0.75, 0.0, 0.0),
# (1.0, 0.0, 0.0))
# }
# bgrw = LinearSegmentedColormap('my_bgrw', cdict1)
plt.figure(0, figsize=(8, 6))
figName = dirName + 'free_energy.png'
if sim == 0:
# plt.title('Free Energy', fontsize=15)
plt.xlabel('Ca-C distance (A)', fontsize=15)
plt.ylabel('Ca-Ow CN', fontsize=15)
plt.xlim(2, 10)
plt.ylim(4, 10)
# here I use plt.text to add colorbar legend, this is fontsize, then verticalalignment and horizontalxxx, rotation
# then use transform=xxx to set the position (1.2,0.5) * axes range, if not transform, you should use 12, (4+10)/2
# in this case
plt.text(1.2, 0.5, 'Free Energy (kJ/mol)', fontsize=18, va='center', ha='center', rotation=90,
transform=plt.gca().transAxes)
if sim == 1:
# plt.title('Free Energy', fontsize=15)
plt.xlabel('Mg-C distance (A)', fontsize=15)
plt.ylabel('Mg-Ow CN', fontsize=15)
plt.xlim(2, 10)
plt.ylim(4.5, 8.)
plt.text(1.2, 0.5, 'Free Energy (kJ/mol)', fontsize=18, va='center', ha='center', rotation=90,
transform=plt.gca().transAxes)
# this is contour plot with lines
con = plt.contour(x, y, z, linewidths=0.5, colors='k', levels=np.linspace(0,60,17))
# this is contour plot with color filling
conf = plt.contourf(x, y, z, cmap=bgrw, levels=np.linspace(0, 50, 100))
# this is color bar
cbar = plt.colorbar(conf, ticks=np.linspace(0, 50, 6), format='%d')
plt.grid(True, color='k', linestyle='--', linewidth=0.5)
plt.savefig(figName, format='png', dpi=300)
plt.show()
|
gpl-3.0
|
rs2/pandas
|
pandas/tests/series/test_missing.py
|
1
|
27622
|
from datetime import datetime, timedelta
import numpy as np
import pytest
import pytz
from pandas._libs import iNaT
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
IntervalIndex,
NaT,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesMissingData:
def test_timedelta_fillna(self):
# GH 3371
s = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = s.diff()
# reg fillna
result = td.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
tm.assert_series_equal(result, expected)
# interpreted as seconds, deprecated
with pytest.raises(TypeError, match="Passing integers to fillna"):
td.fillna(1)
result = td.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
tm.assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
tm.assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
tm.assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
tm.assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
tm.assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
s[2] = np.nan
# ffill
result = s.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = s.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeBlock
s = Series(
[
Timestamp("2011-01-01 10:00"),
pd.NaT,
Timestamp("2011-01-03 10:00"),
pd.NaT,
]
)
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(
{
1: pd.Timestamp("2011-01-02 10:00", tz=tz),
3: pd.Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(
{1: pd.Timestamp("2011-01-02 10:00"), 3: pd.Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(
["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz=tz
)
s = pd.Series(idx)
assert s.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz))
idx = pd.DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
"AAA",
Timestamp("2011-01-03 10:00", tz=tz),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(
{
1: pd.Timestamp("2011-01-02 10:00", tz=tz),
3: pd.Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(
{
1: pd.Timestamp("2011-01-02 10:00", tz=tz),
3: pd.Timestamp("2011-01-04 10:00", tz=tz),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp("20130101"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
def test_fillna_dt64tz_with_method(self):
# with timezone
# GH 15855
ser = pd.Series([pd.Timestamp("2012-11-11 00:00:00+01:00"), pd.NaT])
exp = pd.Series(
[
pd.Timestamp("2012-11-11 00:00:00+01:00"),
pd.Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="pad"), exp)
ser = pd.Series([pd.NaT, pd.Timestamp("2012-11-11 00:00:00+01:00")])
exp = pd.Series(
[
pd.Timestamp("2012-11-11 00:00:00+01:00"),
pd.Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="bfill"), exp)
def test_fillna_consistency(self):
# GH 16402
# fillna with a tz aware to a tz-naive, should result in object
s = Series([Timestamp("20130101"), pd.NaT])
result = s.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
# where (we ignore the errors=)
result = s.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
result = s.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = s.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
s2 = s.copy()
s2[1] = "foo"
tm.assert_series_equal(s2, expected)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series(
[pd.NaT, pd.NaT, datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc)]
)
filled = data.fillna(method="bfill")
expected = pd.Series(
[
datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),
]
)
tm.assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1.0, np.nan])
result = s.fillna(0, downcast="infer")
expected = pd.Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1.0, np.nan])
result = s.fillna({1: 0}, downcast="infer")
expected = pd.Series([1, 0])
tm.assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
return_value = s.fillna(method="ffill", inplace=True)
assert return_value is None
tm.assert_series_equal(s.fillna(method="ffill", inplace=False), s)
def test_categorical_nan_equality(self):
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = cat == cat
tm.assert_series_equal(res, exp)
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(
s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)
)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_isna_for_inf(self):
s = Series(["a", np.inf, np.nan, pd.NA, 1.0])
with pd.option_context("mode.use_inf_as_na", True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, True, False])
de = Series(["a", 1.0], index=[0, 4])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_isnull_for_inf_deprecated(self):
# gh-17115
s = Series(["a", np.inf, np.nan, 1.0])
with pd.option_context("mode.use_inf_as_null", True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(["a", 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self, datetime_series):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
filled = x.fillna(method="ffill")
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], x.index)
tm.assert_series_equal(filled, expected)
filled = x.fillna(method="bfill")
expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], x.index)
tm.assert_series_equal(filled, expected)
def test_fillna_invalid_method(self, datetime_series):
try:
datetime_series.fillna(method="ffil")
except ValueError as inst:
assert "ffil" in str(inst)
def test_ffill(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
ts[2] = np.NaN
tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill"))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
tm.assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
ts[2] = np.NaN
tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill"))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
# GH#16674 iNaT is treated as an integer when given by the user
td1[1] = iNaT
assert not isna(td1[1])
assert td1.dtype == np.object_
assert td1[1] == iNaT
td1[1] = td[1]
assert not isna(td1[1])
td1[2] = NaT
assert isna(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
# FIXME: don't leave commented-out
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isna(result).sum() == 7
# NumPy limitation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= datetime_series <= 0.5
# expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
# tm.assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([], dtype=object)
assert len(s.dropna()) == 0
return_value = s.dropna(inplace=True)
assert return_value is None
assert len(s) == 0
# invalid axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
s.dropna(axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series(
[
Timestamp("2011-01-01 10:00"),
pd.NaT,
Timestamp("2011-01-03 10:00"),
pd.NaT,
]
)
result = s.dropna()
expected = Series(
[Timestamp("2011-01-01 10:00"), Timestamp("2011-01-03 10:00")], index=[0, 2]
)
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(
["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz="Asia/Tokyo"
)
s = pd.Series(idx)
assert s.dtype == "datetime64[ns, Asia/Tokyo]"
result = s.dropna()
expected = Series(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-03 10:00", tz="Asia/Tokyo"),
],
index=[0, 2],
)
assert result.dtype == "datetime64[ns, Asia/Tokyo]"
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name="x"), Series([False, True, False], name="x")]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
return_value = s2.dropna(inplace=True)
assert return_value is None
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series(
[np.nan, 1, 2, 3],
IntervalIndex.from_arrays([np.nan, 0, 1, 2], [np.nan, 1, 2, 3]),
)
result = s.dropna()
expected = s.iloc[1:]
tm.assert_series_equal(result, expected)
def test_valid(self, datetime_series):
ts = datetime_series.copy()
ts.index = ts.index._with_freq(None)
ts[::2] = np.NaN
result = ts.dropna()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notna(ts)])
def test_isna(self):
ser = Series([0, 5.4, 3, np.nan, -0.001])
expected = Series([False, False, False, True, False])
tm.assert_series_equal(ser.isna(), expected)
ser = Series(["hi", "", np.nan])
expected = Series([False, False, True])
tm.assert_series_equal(ser.isna(), expected)
def test_notna(self):
ser = Series([0, 5.4, 3, np.nan, -0.001])
expected = Series([True, True, True, False, True])
tm.assert_series_equal(ser.notna(), expected)
ser = Series(["hi", "", np.nan])
expected = Series([True, True, False])
tm.assert_series_equal(ser.notna(), expected)
def test_pad_nan(self):
x = Series(
[np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"], dtype=float
)
return_value = x.fillna(method="pad", inplace=True)
assert return_value is None
expected = Series(
[np.nan, 1.0, 1.0, 3.0, 3.0], ["z", "a", "b", "c", "d"], dtype=float
)
tm.assert_series_equal(x[1:], expected[1:])
assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range("1/1/2000", "3/1/2000", freq="B")
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
msg = "index must be monotonic increasing or decreasing"
with pytest.raises(ValueError, match=msg):
rng2.get_indexer(rng, method="pad")
def test_dropna_preserve_name(self, datetime_series):
datetime_series[:5] = np.nan
result = datetime_series.dropna()
assert result.name == datetime_series.name
name = datetime_series.name
ts = datetime_series.copy()
return_value = ts.dropna(inplace=True)
assert return_value is None
assert ts.name == name
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method="pad", limit=5)
expected = s[:2].reindex(index).fillna(method="pad")
expected[-3:] = np.nan
tm.assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method="bfill", limit=5)
expected = s[-2:].reindex(index).fillna(method="backfill")
expected[:3] = np.nan
tm.assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method="pad", limit=5)
expected = s[:2].reindex(index).fillna(method="pad")
expected[-3:] = np.nan
tm.assert_series_equal(result, expected)
result = s[-2:].reindex(index, method="backfill", limit=5)
expected = s[-2:].reindex(index).fillna(method="backfill")
expected[:3] = np.nan
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
tgsmith61591/skutil
|
doc/conf.py
|
1
|
9886
|
# -*- coding: utf-8 -*-
#
# skutil documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 9 00:16:56 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath(".." + os.path.sep))
sys.path.insert(0, os.path.abspath('sphinxext'))
print "\n\n" + str(sys.path) + "\n\n"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'numpy_ext.numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'skutil'
copyright = u'2016, Taylor Smith, Charles Drotar'
author = u'Taylor Smith, Charles Drotar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import skutil
version = skutil.__version__
# The full version, including alpha/beta/rc tags.
release = skutil.__version__
print version, release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'skutil v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/h2o-sklearn.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'skutildoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'skutil.tex', u'skutil Documentation',
u'Taylor Smith, Charles Drotar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'skutil', u'skutil Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'skutil', u'skutil Documentation',
author, 'skutil', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bsd-3-clause
|
ArianeFire/HaniCam
|
facerec-master/py/apps/videofacerec/helper/common.py
|
3
|
6324
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This module contais some common routines used by other samples.
'''
import numpy as np
import cv2
import os
from contextlib import contextmanager
import itertools as it
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
else:
self.prev_pt = None
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
@contextmanager
def Timer(msg):
print msg, '...',
start = clock()
try:
yield
finally:
print "%.2f ms" % ((clock()-start)*1000)
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
|
mit
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/pandas/__init__.py
|
1
|
1983
|
# pylint: disable-msg=W0614,W0401,W0611,W0622
__docformat__ = 'restructuredtext'
try:
from pandas import hashtable, tslib, lib
except ImportError as e: # pragma: no cover
module = str(e).lstrip('cannot import name ') # hack but overkill to use re
raise ImportError("C extension: {0} not built. If you want to import "
"pandas from the source directory, you may need to run "
"'python setup.py build_ext --inplace' to build the C "
"extensions first.".format(module))
from datetime import datetime
import numpy as np
# XXX: HACK for NumPy 1.5.1 to suppress warnings
try:
np.seterr(all='ignore')
except Exception: # pragma: no cover
pass
# numpy versioning
from distutils.version import LooseVersion
_np_version = np.version.short_version
_np_version_under1p8 = LooseVersion(_np_version) < '1.8'
_np_version_under1p9 = LooseVersion(_np_version) < '1.9'
from pandas.version import version as __version__
from pandas.info import __doc__
if LooseVersion(_np_version) < '1.7.0':
raise ImportError('pandas {0} is incompatible with numpy < 1.7.0, '
'your numpy version is {1}. Please upgrade numpy to'
' >= 1.7.0 to use pandas version {0}'.format(__version__,
_np_version))
# let init-time option registration happen
import pandas.core.config_init
from pandas.core.api import *
from pandas.sparse.api import *
from pandas.stats.api import *
from pandas.tseries.api import *
from pandas.io.api import *
from pandas.computation.api import *
from pandas.tools.merge import merge, concat, ordered_merge
from pandas.tools.pivot import pivot_table, crosstab
from pandas.tools.plotting import scatter_matrix, plot_params
from pandas.tools.tile import cut, qcut
from pandas.core.reshape import melt
from pandas.util.print_versions import show_versions
import pandas.util.testing
|
mit
|
BiaDarkia/scikit-learn
|
sklearn/ensemble/iforest.py
|
2
|
15758
|
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy as sp
import warnings
from warnings import warn
from sklearn.utils.fixes import euler_gamma
from scipy.sparse import issparse
import numbers
from ..externals import six
from ..tree import ExtraTreeRegressor
from ..utils import check_random_state, check_array
from ..utils.validation import check_is_fitted
from ..base import OutlierMixin
from .bagging import BaseBagging
__all__ = ["IsolationForest"]
INTEGER_TYPES = (numbers.Integral, np.integer)
class IsolationForest(BaseBagging, OutlierMixin):
"""Isolation Forest Algorithm
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Read more in the :ref:`User Guide <isolation_forest>`.
.. versionadded:: 0.18
Parameters
----------
n_estimators : int, optional (default=100)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default="auto")
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the decision function. If 'auto', the decision function threshold is
determined as in the original paper.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=False)
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : integer
The actual number of samples
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: decision_function = score_samples - offset_.
When the contamination parameter is set to "auto", the offset is equal
to -0.5 as the scores of inliers are close to 0 and the scores of
outliers are close to -1. When a contamination parameter different
than "auto" is provided, the offset is defined in such a way we obtain
the expected number of outliers (samples with decision function < 0)
in training.
References
----------
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
anomaly detection." ACM Transactions on Knowledge Discovery from
Data (TKDD) 6.1 (2012): 3.
"""
def __init__(self,
n_estimators=100,
max_samples="auto",
contamination="legacy",
max_features=1.,
bootstrap=False,
n_jobs=1,
random_state=None,
verbose=0):
super(IsolationForest, self).__init__(
base_estimator=ExtraTreeRegressor(
max_features=1,
splitter='random',
random_state=random_state),
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
if contamination == "legacy":
warnings.warn('default contamination parameter 0.1 will change '
'in version 0.22 to "auto". This will change the '
'predict method behavior.',
DeprecationWarning)
self.contamination = contamination
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, six.string_types):
if self.max_samples == 'auto':
max_samples = min(256, n_samples)
else:
raise ValueError('max_samples (%s) is not supported.'
'Valid choices are: "auto", int or'
'float' % self.max_samples)
elif isinstance(self.max_samples, INTEGER_TYPES):
if self.max_samples > n_samples:
warn("max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples))
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not (0. < self.max_samples <= 1.):
raise ValueError("max_samples must be in (0, 1], got %r"
% self.max_samples)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super(IsolationForest, self)._fit(X, y, max_samples,
max_depth=max_depth,
sample_weight=sample_weight)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
# need to save (depreciated) threshold_ in this case:
self._threshold_ = sp.stats.scoreatpercentile(
self.score_samples(X), 100. * 0.1)
elif self.contamination == "legacy": # to be rm in 0.22
self.offset_ = sp.stats.scoreatpercentile(
self.score_samples(X), 100. * 0.1)
else:
self.offset_ = sp.stats.scoreatpercentile(
self.score_samples(X), 100. * self.contamination)
return self
def predict(self, X):
"""Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : array, shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self, ["offset_"])
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
return is_inlier
def decision_function(self, X):
"""Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
scores : array, shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
def score_samples(self, X):
"""Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
scores : array, shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
"""
# code structure from ForestClassifier/predict_proba
check_is_fitted(self, ["estimators_"])
# Check data
X = check_array(X, accept_sparse='csr')
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
n_samples = X.shape[0]
n_samples_leaf = np.zeros((n_samples, self.n_estimators), order="f")
depths = np.zeros((n_samples, self.n_estimators), order="f")
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
for i, (tree, features) in enumerate(zip(self.estimators_,
self.estimators_features_)):
if subsample_features:
X_subset = X[:, features]
else:
X_subset = X
leaves_index = tree.apply(X_subset)
node_indicator = tree.decision_path(X_subset)
n_samples_leaf[:, i] = tree.tree_.n_node_samples[leaves_index]
depths[:, i] = np.ravel(node_indicator.sum(axis=1))
depths[:, i] -= 1
depths += _average_path_length(n_samples_leaf)
scores = 2 ** (-depths.mean(axis=1) / _average_path_length(
self.max_samples_))
# Take the opposite of the scores as bigger is better (here less
# abnormal)
return -scores
@property
def threshold_(self):
warnings.warn("threshold_ attribute is deprecated in 0.20 and will"
" be removed in 0.22.", DeprecationWarning)
if self.contamination == 'auto':
return self._threshold_
return self.offset_
def _average_path_length(n_samples_leaf):
""" The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like, shape (n_samples, n_estimators), or int.
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : array, same shape as n_samples_leaf
"""
if isinstance(n_samples_leaf, INTEGER_TYPES):
if n_samples_leaf <= 1:
return 1.
else:
return 2. * (np.log(n_samples_leaf - 1.) + euler_gamma) - 2. * (
n_samples_leaf - 1.) / n_samples_leaf
else:
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask = (n_samples_leaf <= 1)
not_mask = np.logical_not(mask)
average_path_length[mask] = 1.
average_path_length[not_mask] = 2. * (
np.log(n_samples_leaf[not_mask] - 1.) + euler_gamma) - 2. * (
n_samples_leaf[not_mask] - 1.) / n_samples_leaf[not_mask]
return average_path_length.reshape(n_samples_leaf_shape)
|
bsd-3-clause
|
jakobworldpeace/scikit-learn
|
sklearn/linear_model/sag.py
|
1
|
12700
|
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import make_dataset
from .sag_fast import sag
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
|
bsd-3-clause
|
untom/scikit-learn
|
sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
bsd-3-clause
|
Aasmi/scikit-learn
|
examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
2601
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
bsd-3-clause
|
malcolmw/SeismicPython
|
seispy/pandas/io/fixed_width.py
|
3
|
2431
|
import os
import pandas as pd
from . import schema as _schema
def read_fwf(path=None, schema="css3.0", tables=None):
r"""Read fixed-width-format database tables into a DataFrame.
:param str path: Path to database.
:param str schema: Schema identifier.
:param list tables: A list of table populate.
:return: A dict with table names for keys and pandas.DataFrames as
values.
:rtype: dict
"""
schema_name = schema
# Get the schema.
schema = _schema.get_schema(schema)
# Build the list of tables to populate.
tables = [table for table in tables
if table in schema["Relations"].keys()]\
if tables is not None\
else schema["Relations"].keys()
# If no path is provided, populate tables with null values.
if path is None:
data = {table: _schema.get_empty(schema_name, table)
for table in tables}
return(data)
# Read data files and build tables.
data = {table: pd.read_fwf("%s.%s" % (path, table),
names=schema["Relations"][table],
widths=[schema["Attributes"][field]["width"]+1
for field in schema["Relations"][table]],
comment=schema["comment"] if "comment" in schema
else None)
if os.path.isfile("%s.%s" % (path, table))
else _schema.get_empty(schema_name, table)
for table in tables}
# Coerce dtype of every field.
for table in tables:
for field in schema["Relations"][table]:
data[table][field] = data[table][field].astype(schema["Attributes"][field]["dtype"])
return(data)
def write_fwf(data, path, schema, overwrite=False):
for table in data:
if os.path.isfile("%s.%s" % (path, table)) and overwrite is False:
raise(IOError("file already exists: %s.%s" % (path, table)))
schema = _schema.get_schema(schema)
for table in data:
fields = schema["Relations"][table]
fmt = " ".join([schema["Attributes"][field]["format"]
for field in schema["Relations"][table]])
with open("%s.%s" % (path, table), "w") as outf:
outf.write(
"\n".join([fmt % tuple(row) for _, row in data[table][fields].iterrows()]) + "\n"
)
|
gpl-3.0
|
voxlol/scikit-learn
|
examples/svm/plot_iris.py
|
225
|
3252
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
bsd-3-clause
|
drogenlied/pypid
|
examples/pid_repsonse.py
|
2
|
4198
|
#!/usr/bin/env python
# Copyright (C) 2011-2012 W. Trevor King <[email protected]>
#
# This file is part of pypid.
#
# pypid is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pypid is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pypid. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
from sys import stdout
from time import sleep
try:
from matplotlib import pyplot
from numpy import loadtxt
except (ImportError,RuntimeError), e:
pyplot = None
loadtxt = None
plot_import_error = e
from pypid.backend.test import TestBackend
from pypid.rules import ziegler_nichols_step_response
parser = ArgumentParser(description='Simulate a step response.')
parser.add_argument(
'-K', '--process-gain', metavar='GAIN', type=float, default=1,
help='process gain (PV-units over MV-units)',)
parser.add_argument(
'-L', '--dead-time', metavar='TIME', type=float, default=1,
help='system dead time (lag)')
parser.add_argument(
'-T', '--decay-time', metavar='TIME', type=float, default=1,
help='exponential decay timescale')
parser.add_argument(
'-P', '--proportional', metavar='GAIN', type=float, default=None,
help='process gain (output units over input units)',)
parser.add_argument(
'-I', '--integral', metavar='TIME', type=float, default=None,
help='integral gain timescale')
parser.add_argument(
'-D', '--derivative', metavar='TIME', type=float, default=None,
help='derivative gain timescale')
parser.add_argument(
'-M', '--max-mv', metavar='MV', type=float, default=100.,
help='maximum manipulated variable')
parser.add_argument(
'-A', '--tuning-algorithm', metavar='TUNER', default=None,
choices=['ZN'], help='step tuning algorithm')
parser.add_argument(
'-m', '--mode', metavar='MODE', default='PID',
choices=['P', 'PI', 'PID'], help='controller mode')
parser.add_argument(
'-t', '--time', metavar='TIME', type=float, default=10.,
help='simulation time')
parser.add_argument(
'-o', '--output', default='-', help='output log file')
parser.add_argument(
'-p', '--plot', action='store_true', default=False,
help='plot the repsonse')
args = parser.parse_args()
if args.plot and not (pyplot and loadtxt) :
raise plot_import_error
if args.output == '-':
log_stream = stdout
if args.plot:
raise ValueError('can only plot when outputing to a file')
else:
log_stream = open(args.output, 'w')
K = args.process_gain
L = args.dead_time
T = args.decay_time
p,i,d = (0, float('inf'), 0)
if args.tuning_algorithm == 'ZN':
p,i,d = ziegler_nichols_step_response(
process_gain=K, dead_time=L, decay_time=T, mode=args.mode)
else:
if args.proportional:
p = args.proportional
if args.integral:
i = args.integral
if args.derivative:
d = args.derivative
b = TestBackend(
process_gain=K, dead_time=L, decay_time=T, max_mv=args.max_mv,
log_stream=log_stream)
try:
b.set_up_gains(proportional=p, integral=i, derivative=d)
b.set_down_gains(proportional=p, integral=i, derivative=d)
b.set_setpoint(1.0)
sleep(args.time)
finally:
b.cleanup();
if args.output != '-':
log_stream.close()
if args.plot:
header = open(args.output, 'r').readline()
label = header.strip('#\n').split('\t')
data = loadtxt(args.output)
times = data[:,0] - data[0,0]
pyplot.hold(True)
subplot = 1
for i in range(1, len(label)):
if i in [1, 4, 6]:
if i:
pyplot.legend(loc='best') # add legend to previous subplot
pyplot.subplot(3, 1, subplot)
subplot += 1
pyplot.plot(times, data[:,i], '.', label=label[i])
pyplot.legend(loc='best')
pyplot.show()
|
gpl-3.0
|
RobertABT/heightmap
|
build/matplotlib/examples/pylab_examples/boxplot_demo2.py
|
6
|
4348
|
"""
Thanks Josh Hemann for the example
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
# Generate some data from five different probability distributions,
# each with different characteristics. We want to play with how an IID
# bootstrap resample of the data preserves the distributional
# properties of the original sample, and a boxplot is one visual tool
# to make this assessment
numDists = 5
randomDists = ['Normal(1,1)',' Lognormal(1,1)', 'Exp(1)', 'Gumbel(6,4)',
'Triangular(2,9,11)']
N = 500
norm = np.random.normal(1,1, N)
logn = np.random.lognormal(1,1, N)
expo = np.random.exponential(1, N)
gumb = np.random.gumbel(6, 4, N)
tria = np.random.triangular(2, 9, 11, N)
# Generate some random indices that we'll use to resample the original data
# arrays. For code brevity, just use the same random indices for each array
bootstrapIndices = np.random.random_integers(0, N-1, N)
normBoot = norm[bootstrapIndices]
expoBoot = expo[bootstrapIndices]
gumbBoot = gumb[bootstrapIndices]
lognBoot = logn[bootstrapIndices]
triaBoot = tria[bootstrapIndices]
data = [norm, normBoot, logn, lognBoot, expo, expoBoot, gumb, gumbBoot,
tria, triaBoot]
fig, ax1 = plt.subplots(figsize=(10,6))
fig.canvas.set_window_title('A Boxplot Example')
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = plt.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_title('Comparison of IID Bootstrap Resampling Across Five Distributions')
ax1.set_xlabel('Distribution')
ax1.set_ylabel('Value')
# Now fill the boxes with desired colors
boxColors = ['darkkhaki','royalblue']
numBoxes = numDists*2
medians = range(numBoxes)
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
# Alternate between Dark Khaki and Royal Blue
k = i % 2
boxPolygon = Polygon(boxCoords, facecolor=boxColors[k])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], [np.average(data[i])],
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, numBoxes+0.5)
top = 40
bottom = -5
ax1.set_ylim(bottom, top)
xtickNames = plt.setp(ax1, xticklabels=np.repeat(randomDists, 2))
plt.setp(xtickNames, rotation=45, fontsize=8)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(numBoxes)+1
upperLabels = [str(np.round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick,label in zip(range(numBoxes),ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], top-(top*0.05), upperLabels[tick],
horizontalalignment='center', size='x-small', weight=weights[k],
color=boxColors[k])
# Finally, add a basic legend
plt.figtext(0.80, 0.08, str(N) + ' Random Numbers' ,
backgroundcolor=boxColors[0], color='black', weight='roman',
size='x-small')
plt.figtext(0.80, 0.045, 'IID Bootstrap Resample',
backgroundcolor=boxColors[1],
color='white', weight='roman', size='x-small')
plt.figtext(0.80, 0.015, '*', color='white', backgroundcolor='silver',
weight='roman', size='medium')
plt.figtext(0.815, 0.013, ' Average Value', color='black', weight='roman',
size='x-small')
plt.show()
|
mit
|
CDSFinance/zipline
|
zipline/examples/pairtrade.py
|
11
|
5699
|
#!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logbook
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.transforms import batch_transform
from zipline.utils.factory import load_from_yahoo
from zipline.api import symbol
@batch_transform
def ols_transform(data, sid1, sid2):
"""Computes regression coefficient (slope and intercept)
via Ordinary Least Squares between two SIDs.
"""
p0 = data.price[sid1]
p1 = sm.add_constant(data.price[sid2], prepend=True)
slope, intercept = sm.OLS(p0, p1).fit().params
return slope, intercept
class Pairtrade(TradingAlgorithm):
"""Pairtrading relies on cointegration of two stocks.
The expectation is that once the two stocks drifted apart
(i.e. there is spread), they will eventually revert again. Thus,
if we short the upward drifting stock and long the downward
drifting stock (in short, we buy the spread) once the spread
widened we can sell the spread with profit once they converged
again. A nice property of this algorithm is that we enter the
market in a neutral position.
This specific algorithm tries to exploit the cointegration of
Pepsi and Coca Cola by estimating the correlation between the
two. Divergence of the spread is evaluated by z-scoring.
"""
def initialize(self, window_length=100):
self.spreads = []
self.invested = 0
self.window_length = window_length
self.ols_transform = ols_transform(refresh_period=self.window_length,
window_length=self.window_length)
self.PEP = self.symbol('PEP')
self.KO = self.symbol('KO')
def handle_data(self, data):
######################################################
# 1. Compute regression coefficients between PEP and KO
params = self.ols_transform.handle_data(data, self.PEP, self.KO)
if params is None:
return
intercept, slope = params
######################################################
# 2. Compute spread and zscore
zscore = self.compute_zscore(data, slope, intercept)
self.record(zscores=zscore,
PEP=data[symbol('PEP')].price,
KO=data[symbol('KO')].price)
######################################################
# 3. Place orders
self.place_orders(data, zscore)
def compute_zscore(self, data, slope, intercept):
"""1. Compute the spread given slope and intercept.
2. zscore the spread.
"""
spread = (data[self.PEP].price -
(slope * data[self.KO].price + intercept))
self.spreads.append(spread)
spread_wind = self.spreads[-self.window_length:]
zscore = (spread - np.mean(spread_wind)) / np.std(spread_wind)
return zscore
def place_orders(self, data, zscore):
"""Buy spread if zscore is > 2, sell if zscore < .5.
"""
if zscore >= 2.0 and not self.invested:
self.order(self.PEP, int(100 / data[self.PEP].price))
self.order(self.KO, -int(100 / data[self.KO].price))
self.invested = True
elif zscore <= -2.0 and not self.invested:
self.order(self.PEP, -int(100 / data[self.PEP].price))
self.order(self.KO, int(100 / data[self.KO].price))
self.invested = True
elif abs(zscore) < .5 and self.invested:
self.sell_spread()
self.invested = False
def sell_spread(self):
"""
decrease exposure, regardless of position long/short.
buy for a short position, sell for a long.
"""
ko_amount = self.portfolio.positions[self.KO].amount
self.order(self.KO, -1 * ko_amount)
pep_amount = self.portfolio.positions[self.PEP].amount
self.order(self.PEP, -1 * pep_amount)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
ax1 = plt.subplot(211)
plt.title('PepsiCo & Coca-Cola Co. share prices')
results[['PEP', 'KO']].plot(ax=ax1)
plt.ylabel('Price (USD)')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(212, sharex=ax1)
results.zscores.plot(ax=ax2, color='r')
plt.ylabel('Z-scored spread')
plt.gcf().set_size_inches(18, 8)
plt.show()
# Note: this if-block should be removed if running
# this algorithm on quantopian.com
if __name__ == '__main__':
logbook.StderrHandler().push_application()
# Set the simulation start and end dates.
start = datetime(2000, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
# Load price data from yahoo.
data = load_from_yahoo(stocks=['PEP', 'KO'], indexes={},
start=start, end=end)
# Create and run the algorithm.
pairtrade = Pairtrade()
results = pairtrade.run(data)
# Plot the portfolio data.
analyze(results=results)
|
apache-2.0
|
vene/ambra
|
scripts/semeval_reproduce_task2.py
|
1
|
5151
|
from __future__ import print_function
import sys
import json
import numpy as np
from scipy.stats import sem
from sklearn.base import clone
from sklearn.utils import shuffle
from sklearn.cross_validation import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import FeatureUnion
from sklearn.feature_selection import chi2
from sklearn.feature_extraction.text import TfidfVectorizer
from ambra.cross_validation import cross_val_score
from ambra.tools import PossiblePipeline, Proj, IntervalSelectKBest
from ambra.features import LengthFeatures, StylisticFeatures
from ambra.features import NgramLolAnalyzer
from ambra.interval_scoring import semeval_interval_scorer
from ambra.classifiers import IntervalLogisticRegression
fname = sys.argv[1]
with open(fname) as f:
entries = json.load(f)
# some buggy docs are empty
entries = [entry for entry in entries if len(entry['lemmas'])]
X = np.array(entries)
Y = np.array([doc['interval'] for doc in entries])
Y_possible = np.array([doc['all_fine_intervals'] for doc in entries])
X, Y, Y_possible = shuffle(X, Y, Y_possible, random_state=0)
print("Length features")
print("===============")
pipe = PossiblePipeline([('vect', Proj(LengthFeatures(), key='lemmas')),
('scale', StandardScaler(with_mean=False,
with_std=True)),
('clf', IntervalLogisticRegression(C=0.0008030857221,
n_neighbors=10,
limit_pairs=1,
random_state=0))])
scores = cross_val_score(pipe, X, Y, cv=KFold(len(X), n_folds=5),
scoring=semeval_interval_scorer,
scorer_params=dict(Y_possible=Y_possible),
n_jobs=4)
print("{:.3f} +/- {:.4f}".format(scores.mean(), sem(scores)))
print()
print("Stylistic features")
print("==================")
union = FeatureUnion([('lenghts', Proj(LengthFeatures(), key='lemmas')),
('style', StylisticFeatures())])
pipe = PossiblePipeline([('vect', union),
('scale', StandardScaler(with_mean=False,
with_std=True)),
('clf', IntervalLogisticRegression(C=0.02154434690032,
n_neighbors=10,
limit_pairs=1,
random_state=0))])
scores = cross_val_score(pipe, X, Y, cv=KFold(len(X), n_folds=5),
scoring=semeval_interval_scorer,
scorer_params=dict(Y_possible=Y_possible),
n_jobs=4)
print("{:.3f} +/- {:.4f}".format(scores.mean(), sem(scores)))
print()
print("Full")
print("====")
vectorizer = TfidfVectorizer(use_idf=False, norm='l1',
analyzer=NgramLolAnalyzer(lower=False))
vectorizer_low = TfidfVectorizer(use_idf=False, norm='l1',
analyzer=NgramLolAnalyzer(lower=True))
union = FeatureUnion([('lenghts', Proj(LengthFeatures(), key='lemmas')),
('style', StylisticFeatures()),
('pos', Proj(clone(vectorizer), key='pos')),
('tokens', Proj(clone(vectorizer_low), key='tokens'))])
final_pipe = PossiblePipeline([('union', union),
('scale', StandardScaler(with_mean=False,
with_std=True)),
('fs', IntervalSelectKBest(chi2)),
('clf', IntervalLogisticRegression(
n_neighbors=10,
limit_pairs=0.01, # make larger if possible
random_state=0))])
final_pipe.set_params(**{'union__tokens__transf__min_df': 5,
'union__tokens__transf__max_df': 0.9,
'union__pos__transf__analyzer__ngram_range': (2, 2),
'union__pos__transf__max_df': 0.8,
'fs__k': 2000,
'union__pos__transf__min_df': 1,
'clf__C': 2.592943797404667e-05,
'union__tokens__transf__analyzer__ngram_range': (1, 1)}
)
scores = cross_val_score(final_pipe, X, Y, cv=KFold(len(X), n_folds=5),
scoring=semeval_interval_scorer,
scorer_params=dict(Y_possible=Y_possible),
n_jobs=4
)
print("{:.3f} +/- {:.4f}".format(scores.mean(), sem(scores)))
final_pipe.fit(X, Y)
feature_names = final_pipe.steps[0][1].get_feature_names()
feature_names = np.array(feature_names)[final_pipe.steps[2][1].get_support()]
coef = final_pipe.steps[-1][-1].coef_.ravel()
for idx in np.argsort(-np.abs(coef))[:100]:
print("{:.2f}\t{}".format(coef[idx], feature_names[idx]))
|
bsd-2-clause
|
ambikeshwar1991/sandhi-2
|
module/gr36/gnuradio-core/src/examples/pfb/fmtest.py
|
17
|
7785
|
#!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
fmtx = blks2.nbfm_tx (audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = gr.sig_source_c (if_rate, # sample rate
gr.GR_SIN_WAVE, # waveform type
lo_freq, #frequency
1.0, # amplitude
0) # DC Offset
mixer = gr.multiply_cc ()
self.connect (self, fmtx, (mixer, 0))
self.connect (lo, (mixer, 1))
self.connect (mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = gr.add_cc ()
for n in xrange(self._N):
sig = gr.sig_source_f(self._audio_rate, gr.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = gr.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = gr.vector_sink_c()
self.channel = blks2.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = gr.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=gr.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(blks2.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(blks2.standard_squelch(self._audio_rate*10))
self.snks.append(gr.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
eg-zhang/scikit-learn
|
examples/neural_networks/plot_rbm_logistic_classification.py
|
258
|
4609
|
"""
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
droidicus/dRonin
|
python/visualize/vid_overlay_position.py
|
5
|
3908
|
#!/usr/bin/python
def video_overlay(pa,ned,pos_des,output_filename):
# Generate video of the performance during loiter, this
# includes the loiter position, current estimate of
# position and raw gps position
#output_filename = 'out.mp4'
import matplotlib.animation as animation
import scipy.signal as signal
import matplotlib.pyplot as plt
from numpy import nan, arange
from numpy.lib.function_base import diff
from matplotlib.pyplot import plot, xlabel, ylabel, xlim, ylim, draw
from matplotlib.mlab import find
####### plot section
fig, ax = plt.subplots(1,sharex=True)
fig.set_size_inches([8,4])
fig.patch.set_facecolor('green')
t0_s = pa['time'][0]
t1_s = pa['time'][-1]
# nan out gaps between flights
idx = find(diff(pos_des['time']) > 5000)
pos_des['End'][idx,2] = nan
# Plot position
plot(pa['East'][:,0], pa['North'][:,0], linewidth=3, color=(0.2,0.2,0.2), label="Position")
plot(pa['East'][:,0], pa['North'][:,0], linewidth=2, color=(0.8,0.8,0.8), label="Position")
desired_marker = plot(pos_des['End'][0,1], pos_des['End'][0,0], '*k', markersize=15, label="Desired")
ned_marker = plot(pa['East'][0,0], pa['North'][0,0], '.b', markersize=8)
current_marker = plot(pa['East'][0,0], pa['North'][0,0], '.r', markersize=13)
xlabel('East (m)')
ylabel('North (m)')
xlim(min(pa['East'][:,0]-20), max(pa['East'][:,0])+20)
ylim(min(pa['North'][:,0]-10), max(pa['North'][:,0])+10)
ax.set_axis_bgcolor('green')
ax.xaxis.label.set_color('white')
ax.yaxis.label.set_color('white')
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('green')
ax.spines['right'].set_color('green')
ax.spines['left'].set_color('white')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
fig.set_facecolor('green')
fig.subplots_adjust(left=0.15)
fig.subplots_adjust(bottom=0.15)
draw()
def init(fig=fig):
fig.set_facecolor('green')
# Plot a segment of the path
def update_img(t, pa=pa, pos_des=pos_des, ned=ned, desired_marker=desired_marker, current_marker=current_marker, ned_marker=ned_marker, t0_s=t0_s):
import numpy as np
import matplotlib.pyplot
# convert to minutes
t = t / 60
idx = np.argmin(abs(np.double(pa['time']) / 60 - t))
x = pa['East'][idx,0]
y = pa['North'][idx,0]
current_marker[0].set_xdata(x)
current_marker[0].set_ydata(y)
idx = np.argmin(abs(np.double(ned['time']) / 60 - t))
ned_marker[0].set_xdata(ned['East'][idx])
ned_marker[0].set_ydata(ned['North'][idx])
idx = np.argmin(abs(np.double(pos_des['time']) / 60 - t))
delta = abs(np.double(pos_des['time'][idx]) / 60 - t)
if delta < (1/60.0):
desired_marker[0].set_xdata(pos_des['End'][idx,1])
desired_marker[0].set_ydata(pos_des['End'][idx,0])
fig.patch.set_facecolor('green')
fps = 30.0
dpi = 150
t = arange(t0_s, t1_s, 1/fps)
ani = animation.FuncAnimation(fig,update_img,t,init_func=init,interval=0,blit=False)
writer = animation.writers['ffmpeg'](fps=30)
ani.save(output_filename,dpi=dpi,fps=30,writer=writer,savefig_kwargs={'facecolor':'green'})
def main():
import sys, os
sys.path.insert(1, os.path.dirname(sys.path[0]))
from dronin import telemetry
uavo_list = telemetry.get_telemetry_by_args()
from dronin.uavo import UAVO_PositionActual, UAVO_NEDPosition, UAVO_PathDesired
pa = uavo_list.as_numpy_array(UAVO_PositionActual)
ned = uavo_list.as_numpy_array(UAVO_NEDPosition)
pos_des = uavo_list.as_numpy_array(UAVO_PathDesired)
out = "position_overlay.mp4"
video_overlay(pa,ned,pos_des,out)
if __name__ == "__main__":
main()
|
gpl-3.0
|
RPGOne/scikit-learn
|
examples/ensemble/plot_gradient_boosting_quantile.py
|
392
|
2114
|
"""
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
jasonmccampbell/scipy-refactor
|
scipy/interpolate/tests/test_rbf.py
|
3
|
3557
|
#!/usr/bin/env python
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
import numpy as np
from numpy.testing import assert_, assert_array_almost_equal, assert_almost_equal
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
"""Check that the Rbf function interpolates throught the nodes (1D)"""
olderr = np.seterr(all="ignore")
try:
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
finally:
np.seterr(**olderr)
def check_rbf2d_interpolation(function):
"""Check that the Rbf function interpolates throught the nodes (2D)"""
olderr = np.seterr(all="ignore")
try:
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
finally:
np.seterr(**olderr)
def check_rbf3d_interpolation(function):
"""Check that the Rbf function interpolates throught the nodes (3D)"""
olderr = np.seterr(all="ignore")
try:
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = random.rand(50,1)*4-2
d = x*exp(-x**2-y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
finally:
np.seterr(**olderr)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
"""Check that the Rbf function approximates a smooth function well away
from the nodes."""
olderr = np.seterr(all="ignore")
try:
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
#import matplotlib.pyplot as plt
#plt.figure()
#plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
#plt.title(function)
#plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
finally:
np.seterr(**olderr)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.05,
'inverse multiquadric': 0.02,
'gaussian': 0.01,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def test_default_construction():
"""Check that the Rbf class can be constructed with the default
multiquadric basis function. Regression test for ticket #1228."""
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
"""Check that the Rbf class can be constructed with function=callable."""
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
|
bsd-3-clause
|
hennersz/pySpace
|
basemap/examples/fcstmaps_axesgrid.py
|
3
|
3120
|
from __future__ import print_function
from __future__ import unicode_literals
# this example reads today's numerical weather forecasts
# from the NOAA OpenDAP servers and makes a multi-panel plot.
# This version demonstrates the use of the AxesGrid toolkit.
import numpy as np
import matplotlib.pyplot as plt
import sys
import numpy.ma as ma
import datetime
from mpl_toolkits.basemap import Basemap, addcyclic
from mpl_toolkits.axes_grid1 import AxesGrid
from netCDF4 import Dataset as NetCDFFile, num2date
# today's date is default.
if len(sys.argv) > 1:
YYYYMMDD = sys.argv[1]
else:
YYYYMMDD = datetime.datetime.today().strftime('%Y%m%d')
# set OpenDAP server URL.
try:
URLbase="http://nomads.ncep.noaa.gov:9090/dods/gfs/gfs"
URL=URLbase+YYYYMMDD+'/gfs_00z'
print(URL)
data = NetCDFFile(URL)
except:
msg = """
opendap server not providing the requested data.
Try another date by providing YYYYMMDD on command line."""
raise IOError(msg)
# read lats,lons,times.
print(data.variables.keys())
latitudes = data.variables['lat']
longitudes = data.variables['lon']
fcsttimes = data.variables['time']
times = fcsttimes[0:6] # first 6 forecast times.
ntimes = len(times)
# convert times for datetime instances.
fdates = num2date(times,units=fcsttimes.units,calendar='standard')
# make a list of YYYYMMDDHH strings.
verifdates = [fdate.strftime('%Y%m%d%H') for fdate in fdates]
# convert times to forecast hours.
fcsthrs = []
for fdate in fdates:
fdiff = fdate-fdates[0]
fcsthrs.append(fdiff.days*24. + fdiff.seconds/3600.)
print(fcsthrs)
print(verifdates)
lats = latitudes[:]
nlats = len(lats)
lons1 = longitudes[:]
nlons = len(lons1)
# unpack 2-meter temp forecast data.
t2mvar = data.variables['tmp2m']
# create figure, set up AxesGrid.
fig=plt.figure(figsize=(6,8))
grid = AxesGrid(fig, [0.05,0.01,0.9,0.9],
nrows_ncols=(3, 2),
axes_pad=0.25,
cbar_mode='single',
cbar_pad=0.3,
cbar_size=0.1,
cbar_location='top',
share_all=True,
)
# create Basemap instance for Orthographic projection.
m = Basemap(lon_0=-90,lat_0=60,projection='ortho')
# add wrap-around point in longitude.
t2m = np.zeros((ntimes,nlats,nlons+1),np.float32)
for nt in range(ntimes):
t2m[nt,:,:], lons = addcyclic(t2mvar[nt,:,:], lons1)
# convert to celsius.
t2m = t2m-273.15
# contour levels
clevs = np.arange(-30,30.1,2.)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
# make subplots.
for nt,fcsthr in enumerate(fcsthrs):
ax = grid[nt]
m.ax = ax
cs = m.contourf(x,y,t2m[nt,:,:],clevs,cmap=plt.cm.jet,extend='both')
m.drawcoastlines(linewidth=0.5)
m.drawcountries()
m.drawparallels(np.arange(-80,81,20))
m.drawmeridians(np.arange(0,360,20))
# panel title
ax.set_title('%d-h forecast valid '%fcsthr+verifdates[nt],fontsize=9)
# figure title
plt.figtext(0.5,0.95,
"2-m temp (\N{DEGREE SIGN}C) forecasts from %s"%verifdates[0],
horizontalalignment='center',fontsize=14)
# a single colorbar.
cbar = fig.colorbar(cs, cax=grid.cbar_axes[0], orientation='horizontal')
plt.show()
|
gpl-3.0
|
IndraVikas/scikit-learn
|
examples/covariance/plot_lw_vs_oas.py
|
248
|
2903
|
"""
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
|
bsd-3-clause
|
chugunovyar/factoryForBuild
|
env/lib/python2.7/site-packages/matplotlib/tests/test_dates.py
|
3
|
17505
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map
import datetime
import warnings
import tempfile
import dateutil
import pytz
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
from nose.tools import assert_raises, assert_equal
from nose.plugins.skip import SkipTest
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
@image_comparison(baseline_images=['date_empty'], extensions=['png'])
def test_date_empty():
# make sure mpl does the right thing when told to plot dates even
# if no date data has been presented, cf
# http://sourceforge.net/tracker/?func=detail&aid=2850075&group_id=80706&atid=560720
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.xaxis_date()
@image_comparison(baseline_images=['date_axhspan'], extensions=['png'])
def test_date_axhspan():
# test ax hspan with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.subplots_adjust(left=0.25)
@image_comparison(baseline_images=['date_axvspan'], extensions=['png'])
def test_date_axvspan():
# test ax hspan with date inputs
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2010, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axvspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_xlim(t0 - datetime.timedelta(days=720),
tf + datetime.timedelta(days=720))
fig.autofmt_xdate()
@image_comparison(baseline_images=['date_axhline'],
extensions=['png'])
def test_date_axhline():
# test ax hline with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 31)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhline(t0, color="blue", lw=3)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.subplots_adjust(left=0.25)
@image_comparison(baseline_images=['date_axvline'],
extensions=['png'])
def test_date_axvline():
# test ax hline with date inputs
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2000, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axvline(t0, color="red", lw=3)
ax.set_xlim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.autofmt_xdate()
@cleanup
def test_too_many_date_ticks():
# Attempt to test SF 2715172, see
# https://sourceforge.net/tracker/?func=detail&aid=2715172&group_id=80706&atid=560720
# setting equal datetimes triggers and expander call in
# transforms.nonsingular which results in too many ticks in the
# DayLocator. This should trigger a Locator.MAXTICKS RuntimeError
warnings.filterwarnings(
'ignore',
'Attempting to set identical left==right results\\nin singular '
'transformations; automatically expanding.\\nleft=\d*\.\d*, '
'right=\d*\.\d*',
UserWarning, module='matplotlib.axes')
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2000, 1, 20)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim((t0, tf), auto=True)
ax.plot([], [])
ax.xaxis.set_major_locator(mdates.DayLocator())
assert_raises(RuntimeError, fig.savefig, 'junk.png')
@image_comparison(baseline_images=['RRuleLocator_bounds'], extensions=['png'])
def test_RRuleLocator():
import matplotlib.testing.jpl_units as units
units.register()
# This will cause the RRuleLocator to go out of bounds when it tries
# to add padding to the limits, so we make sure it caps at the correct
# boundary values.
t0 = datetime.datetime(1000, 1, 1)
tf = datetime.datetime(6000, 1, 1)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_autoscale_on(True)
ax.plot([t0, tf], [0.0, 1.0], marker='o')
rrule = mdates.rrulewrapper(dateutil.rrule.YEARLY, interval=500)
locator = mdates.RRuleLocator(rrule)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
ax.autoscale_view()
fig.autofmt_xdate()
@image_comparison(baseline_images=['DateFormatter_fractionalSeconds'],
extensions=['png'])
def test_DateFormatter():
import matplotlib.testing.jpl_units as units
units.register()
# Lets make sure that DateFormatter will allow us to have tick marks
# at intervals of fractional seconds.
t0 = datetime.datetime(2001, 1, 1, 0, 0, 0)
tf = datetime.datetime(2001, 1, 1, 0, 0, 1)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_autoscale_on(True)
ax.plot([t0, tf], [0.0, 1.0], marker='o')
# rrule = mpldates.rrulewrapper( dateutil.rrule.YEARLY, interval=500 )
# locator = mpldates.RRuleLocator( rrule )
# ax.xaxis.set_major_locator( locator )
# ax.xaxis.set_major_formatter( mpldates.AutoDateFormatter(locator) )
ax.autoscale_view()
fig.autofmt_xdate()
def test_date_formatter_strftime():
"""
Tests that DateFormatter matches datetime.strftime,
check microseconds for years before 1900 for bug #3179
as well as a few related issues for years before 1900.
"""
def test_strftime_fields(dt):
"""For datetime object dt, check DateFormatter fields"""
# Note: the last couple of %%s are to check multiple %s are handled
# properly; %% should get replaced by %.
formatter = mdates.DateFormatter("%w %d %m %y %Y %H %I %M %S %%%f %%x")
# Compute date fields without using datetime.strftime,
# since datetime.strftime does not work before year 1900
formatted_date_str = (
"{weekday} {day:02d} {month:02d} {year:02d} {full_year:04d} "
"{hour24:02d} {hour12:02d} {minute:02d} {second:02d} "
"%{microsecond:06d} %x"
.format(
# weeknum=dt.isocalendar()[1], # %U/%W {weeknum:02d}
# %w Sunday=0, weekday() Monday=0
weekday=str((dt.weekday() + 1) % 7),
day=dt.day,
month=dt.month,
year=dt.year % 100,
full_year=dt.year,
hour24=dt.hour,
hour12=((dt.hour-1) % 12) + 1,
minute=dt.minute,
second=dt.second,
microsecond=dt.microsecond))
assert_equal(formatter.strftime(dt), formatted_date_str)
try:
# Test strftime("%x") with the current locale.
import locale # Might not exist on some platforms, such as Windows
locale_formatter = mdates.DateFormatter("%x")
locale_d_fmt = locale.nl_langinfo(locale.D_FMT)
expanded_formatter = mdates.DateFormatter(locale_d_fmt)
assert_equal(locale_formatter.strftime(dt),
expanded_formatter.strftime(dt))
except (ImportError, AttributeError):
pass
for year in range(1, 3000, 71):
# Iterate through random set of years
test_strftime_fields(datetime.datetime(year, 1, 1))
test_strftime_fields(datetime.datetime(year, 2, 3, 4, 5, 6, 12345))
def test_date_formatter_callable():
scale = -11
locator = mock.Mock(_get_unit=mock.Mock(return_value=scale))
callable_formatting_function = (lambda dates, _:
[dt.strftime('%d-%m//%Y') for dt in dates])
formatter = mdates.AutoDateFormatter(locator)
formatter.scaled[-10] = callable_formatting_function
assert_equal(formatter([datetime.datetime(2014, 12, 25)]),
['25-12//2014'])
def test_drange():
"""
This test should check if drange works as expected, and if all the
rounding errors are fixed
"""
start = datetime.datetime(2011, 1, 1, tzinfo=mdates.UTC)
end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)
delta = datetime.timedelta(hours=1)
# We expect 24 values in drange(start, end, delta), because drange returns
# dates from an half open interval [start, end)
assert_equal(24, len(mdates.drange(start, end, delta)))
# if end is a little bit later, we expect the range to contain one element
# more
end = end + datetime.timedelta(microseconds=1)
assert_equal(25, len(mdates.drange(start, end, delta)))
# reset end
end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)
# and tst drange with "complicated" floats:
# 4 hours = 1/6 day, this is an "dangerous" float
delta = datetime.timedelta(hours=4)
daterange = mdates.drange(start, end, delta)
assert_equal(6, len(daterange))
assert_equal(mdates.num2date(daterange[-1]), end - delta)
@cleanup
def test_empty_date_with_year_formatter():
# exposes sf bug 2861426:
# https://sourceforge.net/tracker/?func=detail&aid=2861426&group_id=80706&atid=560720
# update: I am no longer believe this is a bug, as I commented on
# the tracker. The question is now: what to do with this test
import matplotlib.dates as dates
fig = plt.figure()
ax = fig.add_subplot(111)
yearFmt = dates.DateFormatter('%Y')
ax.xaxis.set_major_formatter(yearFmt)
with tempfile.TemporaryFile() as fh:
assert_raises(ValueError, fig.savefig, fh)
def test_auto_date_locator():
def _create_auto_date_locator(date1, date2):
locator = mdates.AutoDateLocator()
locator.create_dummy_axis()
locator.set_view_interval(mdates.date2num(date1),
mdates.date2num(date2))
return locator
d1 = datetime.datetime(1990, 1, 1)
results = ([datetime.timedelta(weeks=52 * 200),
['1990-01-01 00:00:00+00:00', '2010-01-01 00:00:00+00:00',
'2030-01-01 00:00:00+00:00', '2050-01-01 00:00:00+00:00',
'2070-01-01 00:00:00+00:00', '2090-01-01 00:00:00+00:00',
'2110-01-01 00:00:00+00:00', '2130-01-01 00:00:00+00:00',
'2150-01-01 00:00:00+00:00', '2170-01-01 00:00:00+00:00']
],
[datetime.timedelta(weeks=52),
['1990-01-01 00:00:00+00:00', '1990-02-01 00:00:00+00:00',
'1990-03-01 00:00:00+00:00', '1990-04-01 00:00:00+00:00',
'1990-05-01 00:00:00+00:00', '1990-06-01 00:00:00+00:00',
'1990-07-01 00:00:00+00:00', '1990-08-01 00:00:00+00:00',
'1990-09-01 00:00:00+00:00', '1990-10-01 00:00:00+00:00',
'1990-11-01 00:00:00+00:00', '1990-12-01 00:00:00+00:00']
],
[datetime.timedelta(days=141),
['1990-01-05 00:00:00+00:00', '1990-01-26 00:00:00+00:00',
'1990-02-16 00:00:00+00:00', '1990-03-09 00:00:00+00:00',
'1990-03-30 00:00:00+00:00', '1990-04-20 00:00:00+00:00',
'1990-05-11 00:00:00+00:00']
],
[datetime.timedelta(days=40),
['1990-01-03 00:00:00+00:00', '1990-01-10 00:00:00+00:00',
'1990-01-17 00:00:00+00:00', '1990-01-24 00:00:00+00:00',
'1990-01-31 00:00:00+00:00', '1990-02-07 00:00:00+00:00']
],
[datetime.timedelta(hours=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 04:00:00+00:00',
'1990-01-01 08:00:00+00:00', '1990-01-01 12:00:00+00:00',
'1990-01-01 16:00:00+00:00', '1990-01-01 20:00:00+00:00',
'1990-01-02 00:00:00+00:00', '1990-01-02 04:00:00+00:00',
'1990-01-02 08:00:00+00:00', '1990-01-02 12:00:00+00:00',
'1990-01-02 16:00:00+00:00']
],
[datetime.timedelta(minutes=20),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:05:00+00:00',
'1990-01-01 00:10:00+00:00', '1990-01-01 00:15:00+00:00',
'1990-01-01 00:20:00+00:00']
],
[datetime.timedelta(seconds=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:00:05+00:00',
'1990-01-01 00:00:10+00:00', '1990-01-01 00:00:15+00:00',
'1990-01-01 00:00:20+00:00', '1990-01-01 00:00:25+00:00',
'1990-01-01 00:00:30+00:00', '1990-01-01 00:00:35+00:00',
'1990-01-01 00:00:40+00:00']
],
[datetime.timedelta(microseconds=1500),
['1989-12-31 23:59:59.999507+00:00',
'1990-01-01 00:00:00+00:00',
'1990-01-01 00:00:00.000502+00:00',
'1990-01-01 00:00:00.001005+00:00',
'1990-01-01 00:00:00.001508+00:00']
],
)
for t_delta, expected in results:
d2 = d1 + t_delta
locator = _create_auto_date_locator(d1, d2)
assert_equal(list(map(str, mdates.num2date(locator()))),
expected)
@image_comparison(baseline_images=['date_inverted_limit'],
extensions=['png'])
def test_date_inverted_limit():
# test ax hline with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 31)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhline(t0, color="blue", lw=3)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
ax.invert_yaxis()
fig.subplots_adjust(left=0.25)
def _test_date2num_dst(date_range, tz_convert):
# Timezones
BRUSSELS = pytz.timezone('Europe/Brussels')
UTC = pytz.UTC
# Create a list of timezone-aware datetime objects in UTC
# Interval is 0b0.0000011 days, to prevent float rounding issues
dtstart = datetime.datetime(2014, 3, 30, 0, 0, tzinfo=UTC)
interval = datetime.timedelta(minutes=33, seconds=45)
interval_days = 0.0234375 # 2025 / 86400 seconds
N = 8
dt_utc = date_range(start=dtstart, freq=interval, periods=N)
dt_bxl = tz_convert(dt_utc, BRUSSELS)
expected_ordinalf = [735322.0 + (i * interval_days) for i in range(N)]
actual_ordinalf = list(mdates.date2num(dt_bxl))
assert_equal(actual_ordinalf, expected_ordinalf)
def test_date2num_dst():
# Test for github issue #3896, but in date2num around DST transitions
# with a timezone-aware pandas date_range object.
class dt_tzaware(datetime.datetime):
"""
This bug specifically occurs because of the normalization behavior of
pandas Timestamp objects, so in order to replicate it, we need a
datetime-like object that applies timezone normalization after
subtraction.
"""
def __sub__(self, other):
r = super(dt_tzaware, self).__sub__(other)
tzinfo = getattr(r, 'tzinfo', None)
if tzinfo is not None:
localizer = getattr(tzinfo, 'normalize', None)
if localizer is not None:
r = tzinfo.normalize(r)
if isinstance(r, datetime.datetime):
r = self.mk_tzaware(r)
return r
def __add__(self, other):
return self.mk_tzaware(super(dt_tzaware, self).__add__(other))
def astimezone(self, tzinfo):
dt = super(dt_tzaware, self).astimezone(tzinfo)
return self.mk_tzaware(dt)
@classmethod
def mk_tzaware(cls, datetime_obj):
kwargs = {}
attrs = ('year',
'month',
'day',
'hour',
'minute',
'second',
'microsecond',
'tzinfo')
for attr in attrs:
val = getattr(datetime_obj, attr, None)
if val is not None:
kwargs[attr] = val
return cls(**kwargs)
# Define a date_range function similar to pandas.date_range
def date_range(start, freq, periods):
dtstart = dt_tzaware.mk_tzaware(start)
return [dtstart + (i * freq) for i in range(periods)]
# Define a tz_convert function that converts a list to a new time zone.
def tz_convert(dt_list, tzinfo):
return [d.astimezone(tzinfo) for d in dt_list]
_test_date2num_dst(date_range, tz_convert)
def test_date2num_dst_pandas():
# Test for github issue #3896, but in date2num around DST transitions
# with a timezone-aware pandas date_range object.
try:
import pandas as pd
except ImportError:
raise SkipTest('pandas not installed')
def tz_convert(*args):
return pd.DatetimeIndex.tz_convert(*args).astype(datetime.datetime)
_test_date2num_dst(pd.date_range, tz_convert)
def test_DayLocator():
assert_raises(ValueError, mdates.DayLocator, interval=-1)
assert_raises(ValueError, mdates.DayLocator, interval=-1.5)
assert_raises(ValueError, mdates.DayLocator, interval=0)
assert_raises(ValueError, mdates.DayLocator, interval=1.3)
mdates.DayLocator(interval=1.0)
def test_tz_utc():
dt = datetime.datetime(1970, 1, 1, tzinfo=mdates.UTC)
dt.tzname()
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
gpl-3.0
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/matplotlib/projections/polar.py
|
1
|
26790
|
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform_non_affine(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def __getstate__(self):
return {}
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform_non_affine(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
r += rmin
return np.concatenate((theta, r), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return "%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', 1)
self._default_theta_offset = kwargs.pop('theta_offset', 0)
self._default_theta_direction = kwargs.pop('theta_direction', 1)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(self._default_theta_offset)
self.set_theta_direction(self._default_theta_direction)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
22.5, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
def set_rscale(self, *args, **kwargs):
return Axes.set_yscale(self, *args, **kwargs)
def set_rticks(self, *args, **kwargs):
return Axes.set_yticks(self, *args, **kwargs)
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). e.g., 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
# Make sure we take into account unitized data
angles = self.convert_yunits(angles)
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
# Make sure we take into account unitized data
radii = self.convert_xunits(radii)
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return '\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self._r_label_position.to_values()[4])
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self._r_label_position._t = (p.r_label_angle - dt, 0.0)
self._r_label_position.invalidate()
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# to keep things all self contained, we can put aliases to the Polar classes
# defined above. This isn't strictly necessary, but it makes some of the
# code more readable (and provides a backwards compatible Polar API)
PolarAxes.PolarTransform = PolarTransform
PolarAxes.PolarAffine = PolarAffine
PolarAxes.InvertedPolarTransform = InvertedPolarTransform
PolarAxes.ThetaFormatter = ThetaFormatter
PolarAxes.RadialLocator = RadialLocator
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
|
gpl-3.0
|
appapantula/scikit-learn
|
examples/model_selection/plot_underfitting_overfitting.py
|
230
|
2649
|
"""
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
|
bsd-3-clause
|
dgary50/eovsa
|
QTschedule.py
|
1
|
137567
|
#!/usr/bin/env python
'''
Main application for the EOVSA Schedule, which sends commands to the
Array Control Computer according to a timed schedule and runs various
routines locally to generate source coordinates, track tables, uvw
and delay information. This version using the QT Gui'''
# ____ _ _ _
# / ___| ___| |__ ___ __| |_ _ | | ___
# \___ \/ __| '_ \/ _ \/ _` | | | || |/ _ \
# ___) |(__| | | | __/ (_| | |_| || | __/
# |____/\___|_| |_\___|\__,_|\__,_||_|\___|
#
# History:
# 2014-Nov-15 DG
# Started this history log. Added $TRIPS command and added antenna
# diagnostics update every 5 minutes. Extended height of window
# (controlled by Macro list box) to 15 lines.
# 2014-Nov-17 DG
# Changed antenna diagnostics update to occur at half-minute mark
# of 5 minute updates, to avoid collision with other commands.
# 2014-Nov-28 DG
# Fixed the default ROACH channel assignments 'Antlist' in the scan_header
# to be [1,2,3,4,0,0,0,0,0,0,0,0,0,0,0,0]. Also, no longer flag antennas that
# are not in 'Antlist' as not tracking.
# 2014-Nov-29 DG
# Added information to sh_dict['project'] identifying the type of
# observation, mainly to allow finding of SOLPNTCAL right now, but
# likely to be of general use. Also improved visibility of errors
# when writing to the SQL Server. Had to put 'Antlist' back to (wrong)
# original value due to bug in Fortran software--for now...
# 2014-Dec-07 DG
# Add default project/source ID in cases where none is specifically
# defined/hard-coded. Note that default Track Mode is 'FIXED' in this
# case. Also tried to get $SCAN-START to work as a raw command.
# 2014-Dec-08 DG
# Finally fixed problem with scan_state getting set to -1. It was
# happening in get_uvw() when srcname was None. It should be possible
# (e.g. for total power scans) to take data with no source name... Hmm,
# still not working. Also changed default observation Project from
# "Normal Observing" to "NormalObserving".
# 2015-Jan-19 DG
# Attempt to run 4 ROACH boards (2 separate systems)
# Updated: antlist, roach_ips
# 2015-Feb-11 DG
# Implement $DLASWEEP command, which sweeps delay one per second
# on given antenna from a start value to a stop value relative to
# current delay.
# 2015-Feb-14 DG
# Rather major change to interpret a "Raw Command" just like any
# other atomic command in a .ctl file. This should make all
# legitimate atomic commands, even those interpreted by the schedule
# (that start with $) runnable as a "Raw Command."
# 2015-Feb-15 DG
# Several changes to get the code working for Geosats again.
# 2015-Mar-02 DG
# Change the 5000 step delay offset to 5000.5 to make astype(int)
# act as a round().
# 2015-Mar-29 DG
# On starting a new scan, reads DCM_master_table.txt and creates
# dcm.txt according to the frequency sequence, then transfers it to
# the ACC.
# 2015-Apr-02 JV
# Modifications to enable running a second subarray: use commands
# > python schedule.py Subarray2
# for OVSA, or
# > python schedule.py Starburst
# for Starburst. In either case, a master schedule (Subarray1), initiated
# by running schedule.py without args, must be running, because only the
# master schedule updates the antenna diagnostics in the ACC stateframe,
# and only the master schedule writes stateframe data to the SQL database.
# The second subarray schedule writes stateframe data to the ACC using a
# different port than the master schedule.
# 2015-Apr-03 JV
# - Modified App.get_subarray_pid() (used to check if a schedule with same
# name is already running) to be case insensitive.
# - Added a '$SUBARRAY' command to execute_ctlline() which runs SUBARRAY1
# when run by schedule 1 and SUBARRAY2 when run by schedule 2. $SUBARRAY
# command can be invoked in two ways (shown using examples):
# $SUBARRAY ant1-8 ant15
# $SUBARRAY default.antlist starburst
# where the 3rd argument in the 2nd example is the name of an antlist specified
# in the file default.antlist.
# - Wrote function get_antlist(antlistname,antlistfile) to read antlistfile and
# return the antlist defined for name antlistname. This way we can modify
# default.antlist (or other antlistfile) instead of modifying all the .ctl files
# when we want to move ants in and out of the array.
# 2015-Apr-03 DG
# Add s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ahead of s.connect()
# in places where it was missing.
# 2015-May-29 DG
# Converted from using datime() to using Time() based on astropy.
# 2015-Jun-12 DG
# Added command history code written by Rob Gelosa
# 2015-Jun-16 DG
# FTP to ACC now requires a username and password
# 2015-Jun-19 DG
# Now gets ROACH antenna assignments from eovsa_corr.ini, so that this
# can be changed in a single place and propagate correctly to the rest
# of the system.
# 2015-Jun-25 DG
# Now that Ant13's solar power station is online, changed rd_solpwr() to
# read from either power station depending on supplied url.
# 2015-Jun-26 DG
# Finally found and fixed the bug that was preventing automatic update of
# choice of calibrators.
# 2015-Jul-07 DG
# Added subbw for 400 MHz to scan_header (not used in DPP?)
# 2015-Jul-25 DG
# Changes to allow for different X and Y delay centers in file
# acc:/parm/delay_centers.txt. Added dlaceny to sh_dict.
# 2015-Jul-27 DG
# Added optional polarization to $DLASWEEP command (X, Y, or omitted=>both)
# 2015-Jul-29 DG
# I found that the DCMTABLE command was not being issued to send DCM.TXT
# to the ACC. This is now automatically done just before any DCMAUTO-ON
# command is sent.
# 2015-Aug-09 DG
# Added provision to sweep all delays in $DLASWEEP command if ant = 0 is
# specified. This is useful for total power polarization measurements,
# if only X or Y is swept.
# 2015-Aug-17 DG
# Changed to allow setting different adc clock frequency in one place
# [in connect2roach(), i.e. self.brd_clk_freq]
# 2015-Oct-13 DG
# Added $LNA-INIT command to read ACC file LNA_settings.txt and send the
# corresponding commands to the ACC.
# 2015-Oct-22 DG
# Finally had a chance to debug $LNA-INIT command. I ended up having to
# create a new procedure sendctlline() that creates the socket, sends, and
# closes the socket for each line. Then it was possible to send the
# multiple lines needed for $LNA-INIT.
# 2015-Oct-24 DG
# Added PLANET macro command to track a PLANET other than the Sun.
# 2015-Oct-27 DG
# Expand to 6 ROACHes (three pairs) [change to antlist, and to list of roach IPs]
# --one ROACH (#6) seems messed up, so going back to 4 ROACHes for now.
# 2015-Nov-29 DG
# Updated $LNA-INIT command to use new command names and syntax.
# 2016-Jan-16 DG
# Added code for $PCYCLE command, to power-cycle a device in the field
# (antenna, crio, or fem) using the new Viking relay controllers
# 2016-Jan-19 DG
# Added attempt to read pwr_cycle queue
# 2016-Feb-27 DG
# This time I really did add sending of DCM.TXT before DCMAUTO-ON
# 2016-Feb-28 DG
# Changed printing of elapsed time to print only if more than 10 ms
# early or late (should mean shorter log file...). Also fixed a not
# very important bug in dlasweep.
# 2016-Feb-29 DG
# Added $SCAN-RESTART command, to just turn on the scan state
# 2016-Mar-08 DG
# Changed delay offset from 5000 to 8000, to reflect new range of
# 16-ant correlator (0-16000 steps).
# 2016-Mar-15 JRV
# Added function check_27m_sun, which is called once per second (by
# inc_time) to make sure 27-m's do not go w/in 10 degrees of Sun
# 2016-Mar-17 DG
# Changed delay offset from 8000 to 9000, to reflect new range of
# 16-ant correlator (0-16000 steps).
# 2016-Mar-30 DG
# I discovered that I was writing the wrong polarizations.
# 2016-May-04 DG
# Implemented writing of DCM table to sql server as well as to ACC.
# Also added $FEM-INIT command to reset the FEM attenuations to
# their optimal value (for power level 3 dBm)
# 2016-May-20 DG
# Starting a new scan now takes much longer (not sure why), so changed
# the wake_up() timer from 10 s to 15 s
# 2016-Aug-17 DG
# Change to connect2roach() to minimize number of FTP accesses to
# files on ACC.
# 2016-Sep-07 DG
# Aborted the preceding, since it never worked, and the ACC is fixed...
# 2016-Oct-26 DG
# Added $CAPTURE-1S command, to capture 1 s of data packets on dpp.
# Data recording must first be stopped, using $SCAN-STOP.
# 2016-Nov-04 DG
# Add $SCAN-START NODATA option, so that a scan is set up for running
# but no data is recorded (needed prior to $CAPTURE-1S).
# 2016-Nov-15 DG
# Implement two new commands, $PA-TRACK and $PA-STOP, to initiate and
# abort automatic tracking of the position angle setting of the 27-m
# focus rotation mechanism. $PA-TRACK requires an antenna number,
# which is the antenna from which to read the value of parallactic angle.
# 2016-Nov-22 DG
# Changed the code to read delay centers from the database at the
# start of each scan (so code does not have to be reloaded every time).
# Note that it reads from the SQL database, not the ACC file
# /parm/delay_centers.txt, so proper procedures are needed to
# ensure that these are the same! The dppxmp program uses that file.
# 2016-Nov-25 DG
# Delays on ant 13 are still going negative. Change Ant1 delay to
# 11000, to counteract it. I hope this does not drive other delays
# past 16000
# 2016-Nov-26 DG
# Added CALPNTCAL command handling, to do 27-m pointing measurement on
# calibrators.
# 2016-Dec-10 DG
# Added $PA-SWEEP command, which rotates the FRM from -PA to PA at
# a given rate. Uses the same PA_thread variable so that $PA-STOP
# works to stop it, and it cannot run if $PA-TRACK is running (and
# vice versa). Also fixed a number of problems where I was referring
# to np instead of numpy--there is no np in this module!
# 2016-Dec-12 DG
# Changed autogen() to expect the first line to be an ACQUIRE line,
# second to be a PHASECAL, and third to be SUN.
# 2016-Dec-18 DG
# Discovered that there is no difference between make_tracktable and
# make_geosattable, so I eliminated the latter. Also changed split(' ')
# to just split() everywhere, to remove pointless requirement of only
# one space between elements of schedule commands.
# 2017-Jan-05 DG
# It turns out that there were some subtle but important differences
# in make_geosattable(), so I reinstated it.
# 2017-Jan-06 DG
# Discovered that X and Y delays were being set independently, which
# made the difference in delay between X and Y change by 1 step. This
# unwanted behavior explains the random changes in X vs. Y (and XX vs. YY)
# delay that we see in the data. Now only X controls when delays are
# changed, and X and Y are changed together.
# 2017-Feb-06 DG
# Major change to set all source coordinates as J2000 TOPOCENTRIC. This
# means setting the EPOCH string in the scan_header to '2000', and converting
# RA and Dec coordinates from TOPOCENTRIC of DATE as follows:
# ra_j2000 = (src.ra - src.g_ra) + src.a_ra
# dec_j2000 = (src.dec - src.g_dec) + src.a_dec
# where src.ra and src.dec are the old topocentric coordinates of date,
# src.g_ra, src.g_dec are the geocentric coordinates of date, and
# src.a_ra, src.a_dec are the astrometric (J2000, geocentric) coordinates.
# 2017-Feb-08 DG
# The above strategy did not work. I determined that the call to check the 27-m
# position was calling aa.set_jultime(), which has a side-effect of setting the
# epoch to date. I replaced that with a change to aa.date. I also verified
# that uvw coordinates are calculated correctly with the current scheme. Still
# not good enough. The epoch is getting reset somewhere else. I finally gave
# up and just set the epoch to J2000 once per second!
# 2017-Mar-05 DG
# The 300 MHz correlator is working! However, that led me to discover that
# a 200 MHz board clock speed was hard-coded here. I have changed it to
# check the ADC clock speed in the ROACH config file (attached to the
# roach objects when connecting to the ROACHes), and set it to 1/4 of that.
# If for some reason that fails, the clock speed defaults to 300 MHz. I
# also increased the delay offset to 16000. Also added reading of new ACC
# file with the ROACH sync time, in init_scanheader_dict(), which replaces
# the erroneous value we have been using in the scan_header. I also changed
# time reference time of the uvw to correspond to the current second.
# 2017-Mar-06 DG
# Changed delay offset to scale with adc_clk frequency, so that it works for
# either 200 or 300 MHz design.
# 2017-Apr-22 DG
# Updated $CAPTURE-1S handling to allow it to work when no <stem> argument is given.
# 2017-May-18 DG
# Commented out lines relating STARBURST
# 2017-Jun-28 DG
# Added "crossed" keyword handling for $PA_ADJUST command
# 2017-Jul-08 DG
# Changes to allow detection of Ant 14 receiver position (based on stateframe
# information from FEMA, and remembered via self.lorx), and setting of Ant 14
# delays based on that, to those in slot for Ant 15.
# 2017-Jul-10 DG
# After some confusing results, I realized that the ACC delay_centers.txt
# file also had to be changed, because the DPP is using that. Therefore,
# the whole scheme was updated to read from SQL at a $SCAN-START, do the
# swap at that point, if necessary, and then create and FTP the file. This
# solves another problem that the ACC file could in principle deviate from
# the SQL table. Now it cannot (if all goes smoothly).
# 2017-Sep-01 DG
# Added update_status() to create a status file with the currently running
# schedule.
# 2018-Apr-09 DG
# My earlier change of split(' ') to just split() everywhere seems to have
# vanished, so I reedited this change.
# 2018-Jun-08 DG
# Added a distinct PHASECAL_LO command, in order to selectively change the
# receiver commands for a scan using the low-frequency receiver. At the
# moment, a different set of DCM attenuations are needed for that receiver,
# but this ability is probably a good idea in general.
# 2018-Sep-18 DG
# Added new functionality for New menu item! Automatically make today's
# solar schedule.
# 2018-Oct-24 DG
# Rearrange code to check if another schedule is running BEFORE opening a
# new schedule.log file. This avoids overwriting the schedule.log file of
# a running schedule.
# 2018-Dec-26 DG
# First attempt to make the schedule work using the QT Gui
#
import os, signal
os.chdir('/home/sched/Dropbox/PythonCode/Current')
from PyQt4 import QtGui, QtCore
#from Tkinter import *
#import ttk
#from tkMessageBox import *
#from tkFileDialog import *
from ftplib import FTP
import urllib2
import util
import threading, pwr_cycle
import subprocess
import roach
from eovsa_tracktable import *
from eovsa_array import *
from eovsa_lst import eovsa_ha
from math import pi
from readvla import *
from scan_header import *
from gen_schedule_sf import *
import stateframe, stateframedef
from aipy.phs import PointingError
import corr, time, numpy, socket, struct, sys
import ephem
import eovsa_cat
from eovsa_visibility import scan_visible
from whenup import whenup
#import starburst
from matplotlib.mlab import find
import cal_header
import adc_cal2
import pcapture2
from whenup import make_sched
# Determine whether this is the master schedule (Subarray1) or controlling a second subarray
# To run the master schedule, just type > python schedule.py
# or > python schedule.py Subarray1
# To control a second subarray, type > python schedule.py Subarray2
# or > python schedule.py Starburst
if len(sys.argv) < 2: # master schedule (Subarray1)
subarray_name = 'Subarray1'
else:
subarray_name = sys.argv[1] # this option is for a 2nd OVSA subarray - Subarray2 is suggested name,
# but any name should work other than Starburst or Subarray1
mypid = str(os.getpid())
#============================
def get_subarray_pid(subarray_name):
# check whether a subarray with subarray_name ('Subarray1' for master, or 'Subarray2' or 'Starburst')
# is running in another instance of schedule.py (case-insensitive).
# return PID if it is running, -1 if it is not
pidlist = subprocess.check_output(["pidof","python"]).split() # list of PIDs for all python processes
for pid in pidlist:
if mypid != pid:
ps_out = subprocess.check_output(["ps","-lfp",pid])
ind = ps_out.find('schedule.py') # position in string at which 'schedule.py' can be found (-1 if not found)
if ind != -1: # if this PID is running schedule.py
if subarray_name=='Subarray1': # if we are checking if the master schedule is running (Subarray1)
if len(ps_out)==ind+len('schedule.py\n'): # check if this PID is for a master schedule (no extra args after schedule.py)
return pid
elif ps_out.upper().find(subarray_name.upper()) != -1: # if the name of the subarray we are checking was used for this PID
return pid
return -1
# Check whether a schedule is already running. Only one of each subarray is allowed.
match_pid = get_subarray_pid(subarray_name)
if match_pid != -1:
showerror('Error','Another '+subarray_name+' schedule is already running (PID '+ match_pid + ')')
exit()
# Ensure that output to "terminal" goes to log file.
if len(sys.argv)<2: # for master schedule write to schedule.log
sys.stdout = open('/tmp/schedule.log','w')
else: # use a different log file name for the 2nd subarray (schedule_Subarray2.log or schedule_Starburst.log)
sys.stdout = open('/tmp/schedule_'+sys.argv[1]+'.log','w')
# Show the scanheader dictionary as undefined
global sh_dict, sf_dict
userpass = 'admin:observer@'
sh_dict = {}
sf_dict = {}
#============================
def init_scanheader_dict(version=37.0):
''' Create the initial scan header dictionary, with reasonable defaults.
Entries will be overridden before creating the scan_header file using
scan_header.py
'''
global sh_dict
#userpass = 'admin:observer@'
t = util.Time.now()
mjd_ = t.mjd # Get mjd of Time() object
timestamp = t.lv # Get LabVIEW timestamp
aa = eovsa_array()
aa.date = str(aa.date)[:11]+'00:00' # Set date to 0 UT
#print t.iso,aa.epoch
mjd0 = aa.date + 15019.5 # Convert from ephem date to mjd
try:
f = urllib2.urlopen('ftp://'+userpass+'acc.solar.pvt/parm/acc0time.txt',timeout=1)
mjdacc0 = np.double(f.readline().split()[0])
f.close()
except:
print t.iso,'ACC connection for acc0 time timed out. Reading from /tmp/acc0time.txt'
f = open('/tmp/acc0time.txt','r')
mjdacc0 = np.double(f.readline().split()[0])
f.close()
#try:
## Read delay center file from ACC
#dlafile = urllib2.urlopen('ftp://'+userpass+'acc.solar.pvt/parm/delay_centers.txt',timeout=1)
#dcen = []
#dceny = []
#for line in dlafile.readlines():
#if line[0] != '#':
#ant,dx,dy = numpy.array(line.strip().split()).astype('float')
## Skip comment lines and take second number as delay center [ns]
#dcen.append(dx)
#dceny.append(dy)
#except:
#print t.iso,'ACC connection for delay centers timed out.'
#dcen = [0]*16
try:
xml, buf = cal_header.read_cal(4)
dcenters = stateframe.extract(buf,xml['Delaycen_ns'])
dcen = dcenters[:,0]
dceny = dcenters[:,1]
except:
print t.iso,'SQL connection for delay centers failed.'
dcen = [0]*16
try:
# Read eovsa_corr.ini file from ACC and get ROACH antenna assignments.
inifile = urllib2.urlopen('ftp://'+userpass+'acc.solar.pvt/parm/eovsa_corr.ini',timeout=1)
antasgn = ''
for line in inifile.readlines():
if line.find('antasgn') == 0:
# Keep appending antennas to the string (final will end in ',').
antasgn += line.strip().split('=')[1]+','
# Convert string to numpy array
antlist = numpy.array(antasgn[:-1].split(',')).astype('int')
except:
print t.iso,'ACC connection for eovsa_corr.ini (ROACH antenna assignments) timed out.'
antlist = numpy.arange(16) # Assume [bad assumption!] that antennas are assigned in order
print t.iso, 'Antlist is:', antlist
sh_dict = {'timestamp': timestamp,
'Version': version,
'project':'NormalObserving',
'operator':'Kjell Nelin',
'comments':'None',
'version':['1.0.0','1.0.0'],
'nants':16,
'antlist':antlist,
#'antlist':numpy.array([1,2,3,4,0,0,0,0,0,0,0,0,0,0,0,0]), # 1,2,3,4 for prototype
'antpos': aa,
'pbfwhm': [1.22*30*180*3600./210./pi]*13 + [1.22*30*180*3600./2700./pi]*2 + [0.0],
'mount': [1]*13 + [2]*2 + [0],
'scan_type':'test',
'source_id':'None',
'track_mode':'PLANET',
'epoch':'2000',
'dra': 0.0, 'ddec': 0.0, 'ha': 0.0, 'dha': 0.0,
'sun_info': mjd0, # Solar P-angle, B0-angle, and radius will be calculated for this date
'pol': [-5,-6,-7,-8], # Default XX, YY, XY, YX polarization
'max_file_size':100, # MB, max IDB file size
'intval': 20,
'nintval': 50,
'subbw': 0.4/4096, # Case of 400 MHz IF bandwidth (not used?)
'dlacen': numpy.array(dcen),
'dlaceny': numpy.array(dceny),
'time_at_acc0': Time(mjdacc0,format='mjd'),
'roach_brd_clk': [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],
'katadc':[{},{},{},{},{},{},{},{}]} # Empty dictionaries (one for each ROACH) for katadc sensor data
#============================
def init_sched_dict():
'''Create the dictionary that represents the schedule portion of the
stateframe. These entries are overwritten once per second as long
as the schedule is running.
'''
global sf_dict
t = util.Time.now()
mjd_ = t.mjd # Get mjd of Time() object
timestamp = t.lv # Get LabVIEW timestamp
# Create schedule dictionary
sf_dict = {'timestamp': timestamp,
'timestamp1': timestamp,
'scan_state': -1,
'phase_tracking': False,
'uvw': numpy.array([[0.0,0.0,0.0]]*16),
'uvw1': numpy.array([[0.0,0.0,0.0]]*16),
'delay': numpy.zeros(16),
'delay1': numpy.zeros(16),
'geosat': None, # Initally, no geosats defined
'SolPwr':[{},{}],
'delays':[{},{},{},{},{},{},{},{}], # Empty dictionaries (one for each ROACH) for ROACH delays
'sensors':[{},{},{},{},{},{},{},{}]} # Empty dictionaries (one for each ROACH) for ROACH sensor data
#============================
def set_uvw(aa,t,srcname):
'''Set u,v,w coordinates and delays (=-w) [ns] for antenna array aa
at time specified by datime() object d (1 s in advance of current time).
The source name must exist in the source catalog (aa.cat) attached to aa.
'''
global sf_dict, sh_dict
if srcname is None:
if sf_dict['scan_state'] != 1:
init_sched_dict()
return
mjd_ = t.mjd # Get mjd of Time() object (t+1)
aa.date = mjd_ - 15019.5 # Convert mjd to ephem timebase
aa.epoch = '2000/1/1 12:00' # Seems silly to set epoch to J2000 every second, but it keeps getting set to date...
if sf_dict['geosat']:
try:
# Newly calculate coordinates for geosat (an ephem.EarthSatellite object)
# and add it to source catalog. If it already exists, it will be overwritten.
sat = sf_dict['geosat']
sat.compute(aa)
geosat=aipy.amp.RadioFixedBody(sat.ra,sat.dec,name=sat.name)
# This will either add a new source or replace the existing one
aa.cat.add_srcs([geosat,geosat])
except:
# Probably there is no geosat defined. Just continue.
pass
# Fill in scan header dictionary, so that it will be ready at any time for writing
# at the start of a scan. These should be for the current time rather than 1 s in
# the future, since we have not yet "computed" for new time.
src = aa.cat[srcname]
src.compute(aa)
# Save coordinates as TOPOCENTRIC J2000. These should agree with JPL Horizons astrometric
# coordinates for the OVRO site.
sh_dict['ra'] = (src.ra - src.g_ra) + src.a_ra
sh_dict['dec'] = (src.dec - src.g_dec) + src.a_dec
sh_dict['ha'] = eovsa_ha(src)
cat = aa.cat # Make a copy of catalog
cat.compute(aa) # Compute for time t+1
#print t.iso,aa.epoch
sf_dict['uvw'] = sf_dict['uvw1'].copy() # Transfer previously calculated uvw (time t)
sf_dict['delay'] = sf_dict['delay1'].copy() # Transfer previously calculated delay (time t)
sf_dict['timestamp'] = sf_dict['timestamp1'] # Transfer previous timestamp (time t)
timestamp = t.lv # Get LabVIEW timestamp (t+1)
sf_dict['timestamp1'] = timestamp
uvw = sf_dict['uvw1'] # Prepare for next uvw (t+1)
# Loop over antennas to generate uvw of each relative to antenna 1 (index 0)
try:
for i in range(16):
uvw[i] = aa.gen_uvw(0,i,src=cat[srcname]).transpose()[0,0]
sf_dict['phase_tracking'] = True
except PointingError:
# Source is below horizon
uvw = numpy.array([[0.0,0.0,0.0]]*16)
sf_dict['phase_tracking'] = False
# Store result for t+1 stateframe dictionary
sf_dict['uvw1'] = uvw
# Store corresponding delays (= -w coordinate)
sf_dict['delay1'] = -uvw[:,2]
#============================
def mjd(line=None):
# Returns the MJD for the time in a line of the schedule,
# or the current time if no line is supplied.
t = util.Time.now()
if line:
t = util.Time(line[:19])
return t.mjd
#============================
def get_antlist(key='sun',filename='default.antlist'):
# antlist = get_antlist(key='sun',filename='default.antlist')
#
# Load filename into a dictionary of format {antlistname: antlist}
# then return the antlist for the key.
# Supports spaces, commas, etc in antlist. key is not case sensitive,
# but filename is case sensitive.
# If key is not found in filename, returns an empty string.
f = open(filename)
antlist_dict = {}
for line in f:
if len(line.split())<1 or line[0]=='#': # skip blank or commented lines
pass
else:
linekey = line.split()[0].lower() # name of antlist
l = len(linekey)
antlist = line[l:].strip() # antlist
antlist_dict[linekey] = antlist
f.close()
try:
return antlist_dict[key.lower()]
except:
print 'Warning: get_antlist() could not find an antlist of name', key,
'in file', filename + '. In this case get_antlist() returns an empty string.'
return ''
#============================
class App(QtGui.QMainWindow):
'''Main application
'''
def __init__(self, parent=None):
'''Create the user interface and initialize it
'''
super(App, self).__init__(parent)
menu = self.menuBar()
#self.menu = Menu(self.root)
# Define Menu Actions
newAction = QtGui.QAction('&New', self)
newAction.setShortcut('Ctrl+N')
newAction.setStatusTip('Create new file')
newAction.triggered.connect(self.New)
saveAction = QtGui.QAction('&Save', self)
saveAction.setShortcut('Ctrl+S')
saveAction.setStatusTip('Save current file')
saveAction.triggered.connect(self.Save)
openAction = QtGui.QAction('&Open', self)
openAction.setShortcut('Ctrl+O')
openAction.setStatusTip('Open an existing file')
openAction.triggered.connect(self.Open)
exitAction = QtGui.QAction('&Exit', self)
exitAction.setShortcut('Ctrl+E')
exitAction.setStatusTip('Exit the schedule')
exitAction.triggered.connect(QtGui.qApp.quit)
self.statusBar()
filemenu = menu.addMenu('&File')
filemenu.addAction(newAction)
fileMenu.addAction(openAction)
fileMenu.addAction(saveAction)
fileMenu.addAction(exitAction)
self.subarray_name = subarray_name # Subarray1 for master schedule, sys.argv[1] for 2nd schedule
# Define self.mypid, since this will be used by get_subarray_pid() and wake_up()
self.mypid = mypid
# Set function to handle signal alarm, if it should go off. It is set for 15 s in inc_time().
signal.signal(signal.SIGALRM, self.wake_up)
# Read ACC.ini file to get values for global variables
# binsize, xmlpath, scdport and sfport
try:
self.accini = stateframe.rd_ACCfile()
except urllib2.URLError:
showerror('Error','ACC unreachable, or ACC.ini file does not exist\n'
+'Cannot continue.')
exit()
global sf_dict, sh_dict
if self.subarray_name == 'Subarray1':
self.sh_datfile = '/tmp/scan_header.dat'
else:
self.sh_datfile = '/tmp/scan_header_' + self.subarray_name + '.dat'
if sh_dict == {}:
init_scanheader_dict()
# if self.subarray_name == 'Starburst':
# # write over some defaults (such as for dlacen) with Starburst defaults and add Starburst-specific entries
# sh_dict_starburst = starburst.init_sh_dict()
# sh_dict.update(sh_dict_starburst)
# else:
scan_header(sh_dict,self.sh_datfile)
if sf_dict == {}:
init_sched_dict()
# Generate schedule item XML file on init
junk = gen_schedule_sf(sf_dict)#,mk_xml=True)
if self.subarray_name == 'Subarray1':
# Connect to SQL Server database, or set to None if cannot connect
# Only do this for the master schedule (Subarray1)
connstr = "DRIVER={FreeTDS};SERVER=192.168.24.106,1433; \
DATABASE=eOVSA06;UID=aaa;PWD=I@bsbn2w;"
sh, shver = stateframe.xml_ptrs('/tmp/scan_header.xml')
try:
cnxn = stateframedef.pyodbc.connect(connstr)
cursor = cnxn.cursor()
sfbrange, outlist = stateframedef.sfdef(self.accini['sf'])
shbrange, outlist = stateframedef.sfdef(sh)
except:
showerror('Warning','Cannot connect to SQL server\n')
cnxn = None
cursor = None
sfbrange = None
shbrange = None
self.sql = {'cnxn':cnxn,'cursor':cursor,'sfbrange':sfbrange,'shbrange':shbrange}
else:
self.sql = {'cnxn':None,'cursor':None,'sfbrange':None,'shbrange':None}
# Set window title from command-line argument, which is saved in self.subarray_name
self.setWindowTitle('Schedule for '+self.subarray_name)
self.form_widget = FormWidget(self)
self.setCentralWidget(self.form_widget)
self.show()
#============================
def Open(self, filename=None):
''' To open a new file, delete the contents of lines, and
the text widget. Then proceed to populate them again,
checking any PHASECAL lines to confirm that they will be visible.
'''
if self.Toggle == 0:
# Schedule is running, so do nothing
return
self.status.configure(state = NORMAL)
if filename is None:
init_dir = os.getcwd()
f = askopenfile(initialdir = init_dir, mode = 'r',
filetypes = [('SCD Files','*.scd'),('all files','*.*')])
if f is None:
# User cancelled, so do nothing.
self.status.configure(state = DISABLED)
return
else:
f = open(filename,'r')
try: #takes care of an empty line, if there is one, in
#the file being read.
lines = f.readlines()
except AttributeError:
pass
else:
self.L.delete(0, END)
self.curline = 0
self.lastline = len(lines)
for i,line in enumerate(lines):
self.L.insert(END, line.rstrip('\n'))
ctl_cmd = line.split()[2]
if ctl_cmd == 'PHASECAL' or ctl_cmd == 'PHASECAL_LO' or ctl_cmd == 'STARBURST' or ctl_cmd == 'CALPNTCAL':
name = line.split()[3]
# Get start and stop time for this calibrator, as ephem-compatible strings
if i == self.lastline-1:
# Should never happen, but just in case the PHASECAL
# is the last line of the schedule, assume 15 minutes duration
trange = util.Time([mjd(line),mjd(line) + 15.*60./86400.],format='mjd')
else:
line2 = lines[i+1]
trange = util.Time([mjd(line),mjd(line2)],format='mjd')
if ctl_cmd == 'STARBURST':
check_27m = True
check_2m = False
elif ctl_cmd == 'PHASECAL' or ctl_cmd == 'PHASECAL_LO' or ctl_cmd == 'CALPNTCAL':
check_27m = True
check_2m = True
# Check visibility for source
try:
src=self.aa.cat[name]
visible = scan_visible(src,self.aa,trange,check_27m,check_2m)
if not visible:
self.error = 'Warning, source '+name+' not visible at scheduled time: Schedule line '+str(i+1)
print self.error
except:
self.error = 'Err: source '+name+' not found in catalog. Not added to schedule!'
self.state=['']*len(lines)
self.status.configure(state = DISABLED)
if filename is None:
filenamelist = f.name.split('/')
self.filename = filenamelist[len(filenamelist)-1:][0]
else:
self.filename = filename
# Update the status file in /common/webplots for display on the status web page
self.update_status()
#============================
def New(self):
# New option creates a new table with predetermined content.
if self.Toggle == 0:
# Schedule is running, so do nothing
return
self.status.configure( state = NORMAL)
t = util.Time.now()
scd = make_sched(t=t)
self.L.delete(0, END)
for line in scd:
self.L.insert(END,line)
self.curline = 0
self.lastline = len(scd)
self.status.configure( state = DISABLED)
self.filename = 'solar.scd'
#============================
def Save(self):
''' The Save button will save the file as a text file, in a folder
specified by the user. If the file exists, the program will ask
the user if he wants to replace the file.
'''
if self.Toggle == 0:
# Schedule is running, so do nothing
return
try:
#The Exception is to allow the user to cancel the save.
init_dir = os.getcwd()
fileout = asksaveasfile(initialdir = init_dir,
initialfile = self.filename, mode = 'w',
filetypes = [('SCD Files','*.scd'),
('all files','*.*')])
for i in range(self.lastline):
fileout.write(self.L.get(i)+'\n')
fileout.close()
except AttributeError:
pass
class FormWidget(QtGui.QWidget):
def __init__(self, parent):
super(FormWidget,self).__init__(parent)
self.setGeometry(100,0,500,400)
# H-box for Time widget
self.label = QtGui.QLabel('') #bg="yellow",font="Helvetica 16 bold"
timeframe = QtGui.QHBoxLayout()
timeframe.addWidget(self.label)
self.error = '' # Error string to be added to source on front panel
#pageframe = Frame(self.root)
#pageframe.pack(expand = 1, fill = BOTH)
# H-box for radio buttons
toolbar = QtGui.QHBoxLayout()
self.b1 = QtGui.QRadioButton("D")
self.b1.setChecked(True)
self.b_text = "D"
self.b1.toggled.connect(lambda:self.btnstate(self.b1))
toolbar.addWidget(self.b1)
self.b2 = QtGui.QRadioButton("H")
self.b2.toggled.connect(lambda:self.btnstate(self.b2))
toolbar.addWidget(self.b2)
self.b3 = QtGui.QRadioButton("M")
self.b3.toggled.connect(lambda:self.btnstate(self.b3))
toolbar.addWidget(self.b3)
self.b4 = QtGui.QRadioButton("S")
self.b4.toggled.connect(lambda:self.btnstate(self.b4))
toolbar.addWidget(self.b4)
# Widget for current source and phase tracking state
self.source = QtGui.QLabel(" Source: None "+"Phase Tracking: False")
toolbar.addWidget(self.source)
toolbar.addStretch(1)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(timeframe)
vbox.addLayout(toolbar)
vbox.addStretch(1)
textframe = QtGui.QHBoxLayout()
# Main Listbox widget for Macro commands
self.L = QtGui.QListWidget()
Lframe = Frame(textframe)
Lframe.pack(expand = False, fill=Y,side=LEFT)
self.L = Listbox(Lframe,selectmode=EXTENDED,width=45,height=30)
self.Sx = Scrollbar(Lframe,orient=HORIZONTAL,command=self.xview)
self.L.config( xscrollcommand = self.Sx.set)
self.L.bind('<<ListboxSelect>>',self.display_ctl)
# Associated Listbox widget for status (Waiting..., Running..., or Done)
self.status = Listbox(Lframe,width=8)
self.Sx.pack(side=BOTTOM, fill=X)
self.L.pack(side=LEFT, expand=True, fill = Y)
self.S = Scrollbar(Lframe)
self.S.pack( side = LEFT, fill = BOTH)
self.status.pack(side = LEFT, fill = Y, expand = True)
self.S.config (command = self.yview)
self.L.config( yscrollcommand = self.S.set)
self.status.config( yscrollcommand = self.S.set)
# Atomic Command Listbox
Rframe = Frame(textframe)
Rframe.pack(expand = True, fill=BOTH,side=LEFT)
self.L2 = Listbox(Rframe,selectmode=SINGLE,width=25)
self.L2.pack(side=LEFT, fill = BOTH, expand = True)
self.L.atomlist = self.L2 # Associate main listbox (L) with L2
self.S2 = Scrollbar(Rframe)
self.S2.pack(side = RIGHT, fill = BOTH)
self.S2.config( command = self.L2.yview)
self.L2.config( yscrollcommand = self.S2.set)
self.downbutton = Button(fmain, text = '- 1', command=self.Decrease_cmd)
self.downbutton.pack(side=LEFT)
self.upbutton = Button(fmain, text = '+ 1', command=self.Increase_cmd)
self.upbutton.pack(side=LEFT)
self.Insert = Button(fmain,text="Insert", command=self.Insert)
self.Insert.pack(side = LEFT)
# Toggle controls which state the button is, Go/Stop
self.Toggle = 1
#Stop/Go Button
self.B2 = Button(fmain,text='GO', width = 8, command=self.toggle_state, background='GREEN')
self.B2.pack(side = LEFT)
self.TodayBtn = Button(fmain,text = 'Today', command = self.Today)
self.TodayBtn.pack(side = LEFT)
self.ClearBtn = Button(fmain,text = 'Clear', width = 5, command=self.Clear)
self.ClearBtn.pack(side=LEFT)
# Entry widget, for entering a new line into the schedule
## content = StringVar()
## self.E1 = Entry(textvariable = content)
## self.E1.pack( side = RIGHT)
# self.B1 = Button(text="Create new event", command=self.write)
# self.B1.pack( side = RIGHT)
#self.B3 = Button(text="Raw Command", command=self.send_cmd)
#self.B3.pack( side = RIGHT)
#command = StringVar()
#self.E3 = Entry(textvariable = command)
rawlabel = Label(fmain,text='Raw Command:')
rawlabel.pack(side=LEFT)
E3 = Entry(fmain)
E3.pack( side = LEFT, expand=True, fill=X)
E3.bind('<Return>',self.send_cmd)
E3.bind('<Up>',self.up)
E3.bind('<Down>',self.down)
self.cmd_history = []
self.ptr = 0
t = util.Time.now()
self.label.configure(text=t.iso)
# Set default Ant 14 receiver position to the High-Frequency setting, i.e. lorx = False
self.lorx = False
# Setup Project Tab
fproj = Frame()
fproj.pack(side=LEFT)
self.nb.add(fproj,text='Project')
fline1 = Frame(fproj)
fline1.pack(side=TOP)
tmplabel = Label(fline1,text='Project:')
tmplabel.pack(side=LEFT)
self.wproj = Entry(fline1,width=32)
self.wproj.pack( side = LEFT, expand=False, fill=X)
self.wproj.bind('<Return>',self.handle_entry)
tmplabel = Label(fline1,text='Operator:')
tmplabel.pack(side=LEFT)
self.woper = Entry(fline1,width=16)
self.woper.pack( side = LEFT, expand=False, fill=X)
self.woper.bind('<Return>',self.handle_entry)
self.woper.config(state=DISABLED)
fline2 = Frame(fproj)
fline2.pack(side=TOP)
tmplabel = Label(fline2,text='Comment:')
tmplabel.pack(side=LEFT)
self.wcomm = Entry(fline2,width=50)
self.wcomm.pack( side = LEFT, expand=False, fill=X)
self.wcomm.bind('<Return>',self.handle_entry)
# Listbox widget for calibrator times
CTframe = Frame(fproj)
CTframe.pack(expand = False, fill=Y,side=LEFT)
self.CalTimeLB = Listbox(CTframe,selectmode=EXTENDED,width=45,height=10,font="Courier 10 bold")
out = whenup()
for line in out['lines']:
self.CalTimeLB.insert(END,line)
self.CalTimeLB.pack(side=LEFT)
# Make sure window can never be smaller than initially created size
self.root.update_idletasks()
self.root.minsize(self.root.winfo_width(),self.root.winfo_height())
# Generate the Antenna Array object, used to calculate source coordinates,
# uvw, delays, etc.
sys.stdout.flush()
self.aa = eovsa_cat.eovsa_array_with_cat()
#print t.iso,self.aa.epoch,' Initial epoch'
self.aa.epoch = '2000/1/1 12:00' # Get coordinates in J2000 epoch
sys.stdout.flush()
#self.Open('solar.scd')
self.New() # Generate a solar schedule for today
self.filename = 'solar.scd'
# Establish connect to ROACHes.
self.roaches =[]
self.connect2roach()
# Initialize $WAIT settings
self.waitmode = False # Not in $WAIT mode
self.nextctlline = 0
self.wait = 0
self.solpwr = [{},{}] # Empty solar power dictionary pair
self.sensors = [{},{},{},{},{},{},{},{}] # Empty ROACH sensor dictionaries
self.delays = [{},{},{},{},{},{},{},{}] # Empty ROACH delays dictionaries
self.w = {} # Empty weather dictionary
self.PAthread = None
# Start the clock ticking
self.prev = time.time()
self.root.after(1,self.inc_time)
#============================
#============================
def xview(self, *args):
#Definition for the scrollbar to control two windows and the same time.
self.L.xview(*args)
self.status.xview(*args)
#============================
def yview(self, *args):
#Definition for the scrollbar to control two windows and the same time.
self.L.yview(*args)
self.status.yview(*args)
#============================
def connect2roach(self):
# Connect to all ROACHs used by the array. For Starburst subarray, connect to Starburst ROACHs
# instead of OVSA ROACHs.
if self.subarray_name == 'Subarray1': # MASTER SCHEDULE ONLY: connect to OVSA ROACHs
roachModule = roach
roach_ips = ('roach1.solar.pvt','roach2.solar.pvt','roach3.solar.pvt','roach4.solar.pvt',
'roach5.solar.pvt','roach6.solar.pvt','roach7.solar.pvt','roach8.solar.pvt')
boffile_name = self.accini['boffile']
self.brd_clk_freq = None # Start with no clock defined
#self.brd_clk_freq = 200
# elif self.subarray_name == 'Starburst': # STARBURST ONLY: connect to Starburst ROACHs
# roachModule = starburst.roach
# roach_ips = starburst.roach.get_roach_ips()
# boffile_name = starburst.roach.get_boffile_name()
# self.brd_clk_freq = starburst.roach.get_brd_clk_freq()
else: # OVSA SCHEDULE 2: do not connect to any ROACHs (self.roaches will be an empty list)
roach_ips = ()
if len(self.roaches) != 0:
# Some roaches are already connected, so stop them and reconnect
for r in self.roaches:
if r.fpga: r.fpga.stop()
self.roaches = []
# This will eventually be a loop over all active ROACHes, and must
# tolerate a missing ROACH
for roach_ip in roach_ips:
# Make connection to ROACHes
rnum = int(roach_ip[5:6])-1
#if len(self.roaches) > 0:
#cfg = self.roaches[0].cfg
#for line in cfg:
#if line.find('adc clock') != -1:
#self.brd_clk_freq = int(line.strip().split('=')[1])/4
#break
#else:
#cfg = None
#if self.brd_clk_freq is None:
#self.brd_clk_freq = 300
self.roaches.append(roachModule.Roach(roach_ip, boffile_name))#, cfg))
if self.roaches[-1].msg == 'Success':
print roach_ip,'is reachable'
self.roaches[-1].dlasweep = None
try:
self.roaches[-1].brd_clk = self.roaches[-1].fpga.est_brd_clk()
print roach_ip,'clock is',self.roaches[-1].brd_clk
if self.brd_clk_freq is None:
# Board clock has not been set yet, so set it once (from first ROACH)
self.brd_clk_freq = int(self.roaches[-1].brd_clk)
if abs(self.roaches[-1].brd_clk-self.brd_clk_freq)>1:
print roach_ip,'clock NOT', self.brd_clk_freq, 'MHz.'
except:
print roach_ip,'could NOT read FPGA clock speed. Will mark unreachable'
self.roaches[-1].brd_clk = 0.0
self.roaches[-1].fpga.stop()
self.roaches[-1].fpga = None
sh_dict['roach_brd_clk'][rnum] = self.roaches[-1].brd_clk
else:
print roach_ip,'is unreachable!',self.roaches[-1].msg
self.roaches.pop()
#============================
def wake_up(self):
# This is called whenever the 15-second alarm goes off, indicating the
# process is stuck in sk_wait. We simply send ourselves a SIGINT (ctrl-C),
# which should hopefully do it, but we should also log the fact by setting
# a flag in the self object.
self.error = 'The 15-s-alarm went off!'
print self.error
# Try to reestablish connection to the ROACHes, and set self.fpga accordingly
# This will keep dla2roach() from hanging.
self.connect2roach()
os.kill(self.mypid, signal.SIGINT)
#============================
# Raw command history routines
def send_cmd(self,event):
'''Send a raw command to ACC.'''
global sf_dict, sh_dict
command = event.widget.get()
if command != '':
self.execute_ctlline(command)
self.add2cmdlist(command)
event.widget.delete(0,END)
def add2cmdlist(self,command):
''' Adds a command to a 20-element command history
'''
if self.cmd_history == [] and command != '':
# First non-blank command, so append it
self.cmd_history.append(command)
elif command == '' or self.cmd_history[-1] == command:
# Blank command, or same as previous so skip appending
pass
else:
# Unique non-blank command
if len(self.cmd_history) == 20:
# More than 20 commands in history, so discard first
self.cmd_history.pop(0)
self.cmd_history.append(command)
self.ptr = 0 # Set pointer to bottom of command history
def up(self,event):
# User used up-arrow in Raw Command box
if len(self.cmd_history) <= -self.ptr:
# Already at top of command history
pass
else:
# Decrement pointer and insert command string into widget
self.ptr -= 1
event.widget.delete(0,END)
event.widget.insert(0,self.cmd_history[self.ptr])
def down(self,event):
# User used down-arrow in Raw Command box
if self.ptr == 0:
# Already at bottom of command history
pass
else:
if self.ptr == -1:
# Pointer was previously at last command, so enter blank
event.widget.delete(0,END)
self.ptr = 0
else:
# Increment pointer and insert command string into widget
self.ptr += 1
event.widget.delete(0,END)
event.widget.insert(0,self.cmd_history[self.ptr])
#============================
def handle_entry(self,event):
w = event.widget
command = w.get()
if w == self.wproj:
print 'Project is:',command
elif w == self.woper:
print 'Observer is:',command
elif w == self.comm:
print 'Comment is:',command
else:
print 'unknown widget'
#============================
def display_ctl(self,event):
'''Callback for when user clicks in the Macro command window
'''
w = event.widget
if self.Toggle == 0:
# Schedule is running, so clear selection and do nothing
w.selection_clear(0,END)
else:
sel = map(int, w.curselection())
if len(sel) == 1:
line = w.get(sel[0])
cmds = line[20:].split()
f2 = open(cmds[0].rstrip()+'.ctl')
w.atomlist.delete(0,END)
for ctlline in f2.readlines():
w.atomlist.insert(END,ctlline.rstrip('\n'))
f2.close()
#============================
def Clear(self):
#Button to clear the status and the highlight.
self.L.selection_clear(0,END)
self.status.configure( state = NORMAL)
self.status.delete(0,END)
self.state=['']*self.lastline
for x in self.state:
self.status.insert(END,x)
self.L.itemconfig(self.curline,background="white")
self.curline = 0
self.status.configure( state = DISABLED)
#============================
def adjust_selection(self,sel,delt):
if len(sel) == 0:
sel = range(self.lastline)
d = util.datime()
for i in sel:
line = self.L.get(i)
t = util.Time(mjd(line) + delt,format='mjd')
self.L.delete(i)
self.L.insert(i,t.iso[:19] + ' ' + line[20:])
for i in sel:
# Selection is getting unset, so reset it.
self.L.selection_set(i)
#============================
def Decrease_cmd(self):
''' The decrease button will decrease the selected time by one. The options
are Hours, Minutes and Seconds. If no selection is made
it will create an error.
'''
sel = map(int, self.L.curselection()) # list of line indexes selected
if self.var.get() == 0:
one_day = 1.
self.adjust_selection(sel,-one_day)
elif self.var.get() == 1:
one_hour = 1./24
self.adjust_selection(sel,-one_hour)
elif self.var.get() == 2:
one_minute = 1./1440
self.adjust_selection(sel,-one_minute)
elif self.var.get() == 3:
one_second = 1./86400
self.adjust_selection(sel,-one_second)
#============================
def Increase_cmd(self):
''' The increase button will increase the selected time by one. The options
are Hours, Minutes and Seconds. If no selection is made
it will create an error.
'''
sel = map(int, self.L.curselection()) # list of line indexes selected
if self.var.get() == 0:
one_day = 1.
self.adjust_selection(sel,one_day)
elif self.var.get() == 1:
one_hour = 1./24
self.adjust_selection(sel,one_hour)
elif self.var.get() == 2:
one_minute = 1./1440
self.adjust_selection(sel,one_minute)
elif self.var.get() == 3:
one_second = 1./86400
self.adjust_selection(sel,one_second)
#============================
def Today(self,t=None):
''' The button Today will change the schedule to start on the current date.
This will NOT change times. Lines that start a day after the start line
of the schedule will start on today + 1.
'''
if t is None:
t = util.Time.now()
# If this is the standard solar.scd file, do an auto-generate for today
if self.filename == 'solar.scd':
self.New()
else:
# Determine how many days from date of first line to today
line = self.L.get(0)
days = int(t.mjd) - int(mjd(line))
print 'Adding ',days,'days.'
for i in range(self.lastline):
line = self.L.get(i)
linemjd = mjd(line) + days
t2 = util.Time(linemjd,format='mjd')
line = t2.iso[:10] + line[10:]
self.L.delete(i)
self.L.insert(i,line)
self.curline = 0
#============================
def autogen(self,t):
# Auto-generate the standard solar schedule
# Determine sunrise, sunset times for this day
mjdrise, mjdset = suntimes(t)
# First solar line starts at sunrise
trise = util.Time(mjdrise,format='mjd')
line = self.L.get(2)
line = trise.iso[:19] + line[19:]
self.L.delete(2)
self.L.insert(2,line)
# First calibrator line starts 15 min earlier
trise = util.Time(mjdrise - 15.*60./86400.,format='mjd')
line = self.L.get(1)
line = trise.iso[:19] + line[19:]
self.L.delete(1)
self.L.insert(1,line)
# First calibrator ACQUIRE line starts 20 min earlier
trise = util.Time(mjdrise - 20.*60./86400.,format='mjd')
line = self.L.get(0)
line = trise.iso[:19] + line[19:]
self.L.delete(0)
self.L.insert(0,line)
# Last calibrator line starts at sunset
tset = util.Time(mjdset,format='mjd')
line = self.L.get(self.lastline-2)
line = tset.iso[:19] + line[19:]
self.L.delete(self.lastline-2)
self.L.insert(self.lastline-2,line)
# Last (REWIND) line starts 15 min later
tset = util.Time(mjdset + 15.*60./86400.,format='mjd')
line = self.L.get(END)
line = tset.iso[:19] + line[19:]
self.L.delete(END)
self.L.insert(END,line)
# Read calibrator database
cal = readvlacaldb()
# Find sources within 15-35 deg of Sun (also returns antenna array aa, not used)
srclistnarrow, aa = findcal(cal,t=t,dtheta=[15,35])
# Early and late in day, need a wider search window, 15-55 degrees of the Sun
srclistwide, aa = findcal(cal,t=t,dtheta=[15,60])
# Sort by flux density (both narrow and wide)
fluxnarrow = []
for src in srclistnarrow:
fluxnarrow.append(src.mag)
fsortnarrow = sorted(fluxnarrow,reverse=True)
fluxwide = []
for src in srclistwide:
fluxwide.append(src.mag)
fsortwide = sorted(fluxwide,reverse=True)
# Now go through calibrator lines one by one and
# select appropriate source in VLA cal database
for i in range(self.lastline):
line = self.L.get(i)
if line[20:28] == 'PHASECAL':
if i == self.lastline-1:
# Should never happen, but just in case the PHASECAL
# is the last line of the schedule, assume 15 minutes duration
trange = util.Time([mjd(line),mjd(line) + 15.*60./86400.],format='mjd')
else:
line2 = self.L.get(i+1)
trange = util.Time([mjd(line),mjd(line2)],format='mjd')
# Loop over calibrators, highest flux first
for f in fsortnarrow:
# These are 15-min observations, so make sure calibrator
# is visible and will remain visible
idx = fluxnarrow.index(f)
jys = fluxnarrow[idx]
src = srclistnarrow[idx]
visible = scan_visible(src,self.aa,trange,True,True)
if visible:
# Take first visible source, since it will be the one
# with the highest flux in sorted list
break
if visible:
line = line[:29] + src.name + line[37:]
# If this source is not already in the source list, add it
try:
blah = self.aa.cat[src.name]
except KeyError:
# This source is not already in the list, so add it.
# Since src is an ephem.FixedBody, it must be converted
# to an aipy.phs.RadioFixedBody
radiosrc = aipy.amp.RadioFixedBody(src.a_ra,src.a_dec,name=src.name,jys=jys,mfreq=1.0)
radiosrc.compute(aa)
radiosrc.name = src.name
self.aa.cat.add_srcs(radiosrc)
else:
# No source found for narrow window, so try the wide one
for f in fsortwide:
# These are 15-min observations, so make sure calibrator
# is visible and will remain visible
idx = fluxwide.index(f)
jys = fluxwide[idx]
src = srclistwide[idx]
visible = scan_visible(src,self.aa,trange,True,True)
if visible:
# Take first visible source, since it will be the one
# with the highest flux in sorted list
break
if visible:
line = line[:29] + src.name + line[37:]
# If this source is not already in the source list, add it
try:
blah = self.aa.cat[src.name]
except KeyError:
# This source is not already in the list, so add it.
# Since src is an ephem.FixedBody, it must be converted
# to an aipy.phs.RadioFixedBody
radiosrc = aipy.amp.RadioFixedBody(src.a_ra,src.a_dec,name=src.name,jys=jys,mfreq=1.0)
radiosrc.compute(aa)
radiosrc.name = src.name
self.aa.cat.add_srcs(radiosrc)
else:
# No source found for wide, so mark line as a failure
line = line[:29] + 'No Src!!' + line[37:]
print 'No source after searching the following sources: '
for f in fsortwide:
idx = fluxwide.index(f)
jys = fluxwide[idx]
src = srclistwide[idx]
visible = scan_visible(src,self.aa,trange)
print src.name, jys, visible, src.ra, src.dec, ' ', src.az, src.alt
self.L.delete(i)
self.L.insert(i,line)
if i == 1:
# Check if earlier line is ACQUIRE, and if so, make sure it goes to the same
# calibrator!
acline = self.L.get(0)
if acline[20:27] == 'ACQUIRE':
acline = acline[:28] + src.name
self.L.delete(0)
self.L.insert(0,acline)
#============================
def Insert(self):
''' Insert button will take the text written in the entry widget and
insert it at the insertion indicator
'''
self.status.configure( state = NORMAL )
sel = map(int, self.L.curselection())
if len(sel) == 1:
index = sel[0]
self.content = self.E1.get().upper()
if self.content:
line = self.L.get(index)
line = line[:20] + self.content
self.L.insert(index,line)
self.lastline += self.lastline
else:
# If there is no string do not do anything.
pass
self.status.insert(END, '')
self.status.configure( state = DISABLED)
#============================
def toggle_state(self):
now = mjd()
if self.Toggle == 0: #Stop was pressed
self.Toggle = 1
self.B2.configure(text = 'Go')
self.B2.configure(background = 'GREEN')
self.downbutton.configure(state = NORMAL)
self.upbutton.configure(state = NORMAL)
## self.B1.configure(state = NORMAL)
self.Insert.configure(state = NORMAL)
self.ClearBtn.configure(state = NORMAL)
self.TodayBtn.configure(state = NORMAL)
self.L.configure( state = NORMAL)
else: #Go was Pressed
self.Toggle = 0
self.status.configure( state = NORMAL)
self.L.selection_clear(0,END)
self.B2.configure(text = 'STOP')
self.B2.configure(background = 'RED')
self.curline = self.lastline-1 # Will be overridden if another line is determined to be current line
# Go through the status lines to find the expired ones.
for i in range(self.lastline):
## if self.mjd[i] >= now:
line = self.L.get(i)
if mjd(line) >= now:
# Lines in the future
if i == 0:
# If the first line, mark it Waiting
self.status.delete(i)
self.status.insert(i,'Waiting...')
self.curline = i
self.L.itemconfig(i,background="orange")
else:
# If not the first line, mark earlier line for starting.
self.curline = i-1
self.status.delete(self.curline)
self.status.insert(self.curline,'Started...')
self.L.itemconfig(self.curline,background="orange")
break
else:
# Mark lines in the past as Skipped.
self.status.delete(i)
self.status.insert(i,'Skipped')
# Clear remaining lines
for i in range(min(self.curline+1,self.lastline),self.lastline):
self.status.delete(i)
self.status.insert(i,'')
if self.curline == (self.lastline-1):
self.status.delete(self.curline)
self.status.insert(self.curline,'Started...')
# Find the file associated with the Macro command on the current
# line and fill in the L2 Listbox
line = self.L.get(self.curline)
cmds = line[20:].split()
f2 = open(cmds[0].rstrip()+'.ctl')
self.L2.delete(0,END)
lines = f2.readlines()
for ctlline in lines:
# Check for hash mark (#) in line other than first character
# (hash mark in first character means a comment)
if '#' in ctlline[1:]:
# We have a substitution to do
ihash = ctlline[1:].find('#')+1
i = int(ctlline[ihash+1:ihash+2])
ctlline = ctlline[:ihash]+cmds[i]+ctlline[ihash+2:]
self.L2.insert(END,ctlline.rstrip('\n'))
f2.close()
self.L.see(min(self.curline+5,END))
self.status.see(min(self.curline+5,END))
self.downbutton.configure(state = DISABLED)
self.upbutton.configure(state = DISABLED)
## self.B1.configure(state = DISABLED)
self.Insert.configure(state = DISABLED)
self.ClearBtn.configure(state = DISABLED)
self.TodayBtn.configure(state = DISABLED)
self.status.configure(state = DISABLED)
#============================
def check_27m_sun(self,sf,data):
pass
def check_27m_sun2(self,sf,data): # Disable for now by renaming (2017-09-05 DG)
# For each 27m, check whether it is too close to Sun (min_dist: 10 degrees);
# if it is, use 'position' command to send to stow position (HA = 0, dec = 29)
#
# If ant is in runmode TRACK or POSITION, then trigger if Requested position
# is within min_dist of Sun OR if Actual position is within min_dist of Sun
# for 3 seconds (it should take < 2 sec to slew across Sun, which is okay)
# - timer restarts if this function sends a position command for the antenna
# If ant is in runmode STOP or VELOCITY, then trigger only if Actual position
# is within min_dist of Sun for 3 seconds (do not trigger based on Requested
# position because Requested position is not accurate in these modes)
#
# If all Controller monitor data from antenna is zero, does not make check
#
# Stow position (0 29) can be 6 degrees from Sun in summer. Code does not trigger
# position command if antenna is already within 0.5 degrees of stow position.
#
# Input params: sf is accini['sf'], data is from get_stateframe (which was just run by inc_time)
global sf_dict
global sun_timer
global trigger_timer
try:
k = sun_timer.keys()
except:
sun_timer = {}
try:
k = trigger_timer.keys()
except:
trigger_timer = {}
min_dist = 10. # minimum_distance from Sun
max_time = 2. # maximum time near Sun (to allow slewing)
safe_pos = [0,29] # safe position to send antenna to - stow position
trigger_cadence = 20 # minimum time between sending command in response to trigger
test_mode = False # prints a bunch of diagnostic info to log if this is True
# define dict to convert runmode ID# from controller to meaning
runmode_map = {0: 'stop', 1: 'position', 2: 'velocity', 4: 'track'}
# get coords of Sun
sun = self.aa.cat['Sun']
# sun = self.aa.cat['AMC-8 (GE-8)'] # for testing purposes can put name of geosat to avoid
self.aa.date = util.Time.now().mjd - 15019.5 # set date to present time
sun.compute(self.aa)
sun_coords = (sun.ra,sun.dec)
lst = self.aa.sidereal_time() # current LST
# calc RA,dec of safe pos (to avoid triggering when within 0.5 deg of safe pos)
safe_coords = (lst-safe_pos[0]*pi/180.,safe_pos[1]*pi/180.)
for antnum in [14]:
# augment trigger_timer and skip this antenna if it's less than trigger_cadence since last trigger
try:
trigger_timer[antnum] = trigger_timer[antnum] + 1
except KeyError:
trigger_timer[antnum] = 1000 # start high so that it won't wait 20s before the first trigger
if trigger_timer[antnum] < trigger_cadence:
if test_mode: print 'Skipping ant '+ str(antnum) + ' because <' +str(trigger_cadence)+' sec since last trigger'
continue
# skip this antenna if all crio monitor data for this ant is zero
c = sf['Antenna'][antnum-1]['Controller']
sflist = array([stateframe.extract(data,c[k]) for k in c.keys()])
if len(find(sflist != 0)) == 0:
if test_mode: print 'Skipping ant '+ str(antnum)+' because no stateframe data from controller'
continue
trigger = False # set this to True if too close to Sun
# get runmode from stateframe data passed to me
rm = stateframe.extract(data,c['RunMode'])
runmode = runmode_map[rm]
if test_mode:
print '-----------'
print 'LST:', lst, '- Sun coords:', sun_coords
print 'Antnum:', antnum
print 'Runmode:', rm, runmode
# determine whether to trigger moving to safe position
# all runmodes: trigger if Actual pos near Sun for > max_time (2 sec)
# check distance of Actual position from Sun
HA_actual = sf_dict['ActualAzimuth'][antnum-1] * pi/180.
dec_actual = sf_dict['ActualElevation'][antnum-1] * pi/180.
actual_coords = (lst-HA_actual,dec_actual)
actual_dist = ephem.separation(actual_coords,sun_coords)*180./pi
if actual_dist > min_dist:
# if Actual position is not too near Sun, reset timer to zero
sun_timer[antnum] = 0.
else:
try:
t = sun_timer[antnum]
except KeyError:
if test_mode: print 'KeyError! setting suntimer to 0'
sun_timer[antnum] = 0.
# if Actual position is too close to Sun, increment timer by one second
sun_timer[antnum] = sun_timer[antnum] + 1.
if sun_timer[antnum] > max_time:
# trigger if Actual position has been close to Sun for more than max_time
# (and not within 0.5 degrees of safe_pos)
safe_actual_dist = ephem.separation(actual_coords,safe_coords)
if test_mode: print 'Dist between stow and actual position:', safe_actual_dist
if safe_actual_dist > 0.5:
trigger = True
if test_mode:
print 'Actual coords:', actual_coords, '- Actual dist:', actual_dist
print 'Sun timer:', sun_timer[antnum]
print 'Trigger based on Sun timer:', trigger
# runmodes POSITION and TRACK: trigger if Requested pos too near Sun
if runmode in ['track','position']:
HA_requested = sf_dict['RequestedAzimuth'][antnum-1] * pi/180.
dec_requested = sf_dict['RequestedElevation'][antnum-1] * pi/180.
requested_coords = (lst-HA_requested,dec_requested)
requested_dist = ephem.separation(requested_coords,sun_coords)*180./pi
if test_mode: print 'Requested coords:', requested_coords, '- Requested dist:', requested_dist
if requested_dist < min_dist:
# trigger if Requested position is too close to Sun and not within 0.5 degrees of stow
safe_requested_dist = ephem.separation(requested_coords,safe_coords)
if test_mode: print 'Dist between stow and requested position:', safe_requested_dist
if safe_requested_dist > 0.5:
trigger = True
if test_mode: print 'Trigger:', trigger
if trigger:
trigger_timer[antnum] = 0. # reset timer so antenna has time to process command and slew off Sun
# position command causes antenna to go to safe_pos, enter 'position'
# runmode, but it will not lose its tracktable
safe_str = str(safe_pos[0]) + ' ' + str(safe_pos[1])
stop_cmd = 'stop ant' + str(antnum)
pos_cmd = 'position ' + safe_str + ' ant' + str(antnum)
self.sendctlline(stop_cmd)
self.sendctlline(pos_cmd)
print 'Antenna ' + str(antnum) + ' actual or requested position too close to Sun, sending to safe position: ' + safe_str
print stop_cmd
print pos_cmd
sys.stdout.flush()
#============================
def inc_time(self):
global sf_dict, sh_dict
self.status.configure(state=NORMAL)
# First set the timer to wake us on the next second
t = util.Time.now()
tdif = int((t.datetime.microsecond)/1000.)
self.timer.singleShot(1000 - tdif, self.inc_time)
# Update the clock
self.form_widget.label.setText(t.iso[:19])
# Set an alarm for 15 seconds. If the process hangs for more than that, we will send ourselves
# a SIGINT via the Callback self.wake_up(), which should recover from sk_wait hang up
signal.alarm(15) # This will reset alarm if it has not gone off yet
tnow = time.time()
self.telapsed = tnow - self.prev
self.prev = tnow
telapsed = int(self.telapsed*1000)
if telapsed > 990 and telapsed < 1010:
pass
else:
# If elapsed time is not nominal (e.g. 990 or 1010), write it to log file.
print t.iso,str(int(self.telapsed*1000))
sys.stdout.flush() # Flush stdout (/tmp/schedule.log or /tmp/schedule_[self.subarray_name].log) so we can see this '-'.
# Attempt to read from spawned task pwr_cycle.ant_toggle() queue. Reads up to 10
# items at a time unless queue is empty.
for i in range(10):
try:
msg = pwr_cycle.q.get_nowait()
print t.iso,msg
except:
break
# Attempt to read from spawned task pcapture2.capture_1s() queue.
try:
msg = pcapture2.q.get_nowait()
print t.iso,msg
except:
pass
# If this schedule is running the second subarray, confirm that Subarray1 is running; if it is not,
# print a warning message to the log file.
if self.subarray_name != 'Subarray1':
subarray1_pid = self.get_subarray_pid('Subarray1')
if subarray1_pid == -1:
print util.datime().get('str'), \
'Warning: The master schedule (Subarray1) is not running. This means that antenna diagnostic ' + \
'information will not be updated in the ACC stateframe and no data will be written to the SQL database.'
sys.stdout.flush() # Flush stdout (/tmp/schedule.log or /tmp/schedule_[self.subarray_name].log) once per second so we can see the output.
# Update phase tracking (u,v,w and delays)
srcname = sh_dict['source_id']
try:
# Generate a Time() object at exactly the next upcoming second (time t+1)
t2 = util.Time.now()
tsec = util.Time(t2.mjd + (1 - t2.datetime.microsecond/1000000.)/86400.,format='mjd')
src = self.aa.cat[srcname] # This causes KeyError if source is not found
except KeyError:
# The current scan header source ID is not in the source catalog
srcname = None
# Debug info, simply logs that we have started this procedure
#sys.stdout.write('+')
#sys.stdout.flush() # Flush stdout (/tmp/schedule.log or /tmp/schedule_[self.subarray_name].log) so we can see this '-'.
set_uvw(self.aa,tsec,srcname)
#sys.stdout.write('-')
#sys.stdout.flush() # Flush stdout (/tmp/schedule.log or /tmp/schedule_[self.subarray_name].log) so we can see this '-'.
self.source.setText(text=' Source: '+(str(srcname)+' ')[:12]
+'Phase Tracking: '
+str(bool(sf_dict['phase_tracking'])) + ' '+self.error)
self.error = ''
# Send integer delays to ROACHs - DIFFERENT FOR STARBURST
# if self.subarray_name == 'Starburst':
# starburst.roach.dla2roach(self,sh_dict,sf_dict)
# else:
self.dla2roach()
# Update weather information in sf_dict (reads from OVRO weather station)
self.w = stateframe.weather()
sf_dict.update(self.w)
# Once per minute, update the information from the Solar Power station(s)
if t.datetime.second == 0:
# Updates first solar power station on the minute
self.solpwr[0] = stateframe.rd_solpwr('http://data.magnumenergy.com/MW5127')
if t.datetime.second == 1:
# Updates second solar power station one second later
self.solpwr[1] = stateframe.rd_solpwr('http://data.magnumenergy.com/MW5241')
sf_dict.update({'SolPwr':self.solpwr})
# Read ROACH sensor data, but only one each minute, staggered over different times
# since for all 8 ROACHes this can take more than 0.5 s
for i in range(len(self.roaches)):
if t.datetime.second == 5*i+5:
r = self.roaches[i]
rnum = int(r.roach_ip[5:6])-1
if r.fpga:
r.get_sensor_dict()
if r.msg == 'Success':
self.sensors[rnum].update(r.sensors)
else:
self.sensors[rnum] = {}
else:
self.sensors[rnum] = {}
self.cr_temp = stateframe.control_room_temp()
self.sensors[0]['temp.ambient'] = self.cr_temp
# MASTER SCHEDULE ONLY: Update antenna diagnostics, but only once every 5 minutes (300 s)
if self.subarray_name == 'Subarray1':
if int(t.mjd * 86400.) % 300 == 30:
# Open socket to ACC
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Send commands to update antenna trip information
s.connect((self.accini['host'],self.accini['scdport']))
s.send('UPDATEAZIMUTHDIAGNOSTICS 1')
time.sleep(0.01)
s.send('UPDATEELEVATIONDIAGNOSTICS 1')
time.sleep(0.01)
s.close()
except:
pass
# Read ROACH delay values
for i in range(len(self.roaches)):
r = self.roaches[i]
rnum = int(r.roach_ip[5:6])-1
if r.fpga:
r.get_delays()
if r.msg == 'Success':
delays = dict(zip(['dx0','dy0','dx1','dy1'],r.delays))
else:
delays = dict(zip(['dx0','dy0','dx1','dy1'],[0,0,0,0]))
else:
delays = dict(zip(['dx0','dy0','dx1','dy1'],[0,0,0,0]))
self.delays[rnum].update(delays)
for i in range(8):
sf_dict['sensors'][i].update(self.sensors[i])
sf_dict['delays'][i].update(self.delays[i])
# # STARBURST ONLY: update sf_dict with Starburst-specific monitor data
# if self.subarray_name == 'Starburst':
# sf_dict_starburst = starburst.get_sf_dict(self)
# sf_dict.update(sf_dict_starburst) # make sure that entries in sf_dict_starburst have unique keys so that we don't overwrite sf_dict entries
# Get current stateframe (from ACC) and update sf_dict with Azimuth, Elevation, TrackFlag
# and parallactic angle information from it (all in degrees!)
data, msg = stateframe.get_stateframe(self.accini)
if msg == 'No Error':
version = struct.unpack_from('d',data,8)[0] # Get stateframe version from data
if version > 0.0 and version != self.accini['version']:
# The version number of the stateframe data has changed, so we need to reread
# the ACC ini file (which will read a new stateframe.xml file and give us a new
# sf dictionary.
self.accini = stateframe.rd_ACCfile()
# MASTER SCHEDULE ONLY: If we are connected to the SQL database, we will need to create and send
# a new stateframe definition
if self.subarray_name == 'Subarray1':
result = stateframedef.load_deftable(sdict=self.accini['sf'],version=version)
if not result:
sys.stdout.write('Error loading new stateframe definition')
sys.stdout.flush()
sf = self.accini['sf']
sf_dict.update(stateframe.azel_from_stateframe(sf,data))
# Flag unused antennas as not tracking (no! this is just the ROACH assignments)
# sf_dict['TrackFlag'] = (sf_dict['TrackFlag']) & (sh_dict['antlist'] != 0)
# Check that 27-m antennas are not too close to Sun, and if they are, send them to stow position (using 'position' command)
self.check_27m_sun(sf,data)
else:
self.error = msg
# If we are connected to the SQL database, send converted stateframe (only master schedule is connected)
if msg == 'No Error' and self.sql['cnxn']:
bufout = stateframedef.transmogrify(data, self.sql['sfbrange'])
try:
self.sql['cursor'].execute('insert into fBin (Bin) values (?)',
stateframedef.pyodbc.Binary(bufout))
#sys.stdout.write('*')
#sys.stdout.flush()
self.sql['cnxn'].commit()
except:
# An exception could be an error, or just that the entry was already inserted
self.error = 'Err: Cannot write stateframe to SQL'
# Create schedule part of stateframe from sf_dict
# Subarray1 writes Weather, SolarPower, Roach whereas Subarray2/Starburst don't
if self.subarray_name == 'Subarray1':
fmt, buf, sched_xmlfile = gen_schedule_sf(sf_dict)
# else:
# # SUBARRAY2 (OVSA) AND STARBURST: use starburst module's gen_schedule2_sf to create binary buffer to write to ACC
# # if it is Subarray2 (OVSA), starburst.gen_schedule2_sf writes default values for the Starburst-specific data
# fmt, buf, sched_xmlfile = starburst.gen_schedule2_sf(sf_dict) # using mk_xml=False, so make sure schedule2_stateframe.xml already exists
# Open socket to ACC - PORT DEPENDS ON SUBARRAY
if self.subarray_name == 'Subarray1':
portkey = 'scdsfport'
else:
portkey = 'scd2sfport'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Try to connect and send schedule items of stateframe to ACC
# Uses "schedule" command and encloses data buffer in square brackets
time.sleep(0.01)
s.connect((self.accini['host'],self.accini[portkey]))
s.settimeout(0.5)
s.sendall(buf)
time.sleep(0.02)
s.close()
except socket.timeout:
print util.datime().get('str'),'Socket time-out when writing sched stateframe to ACC'
s.close()
except:
self.error = 'Err: Cannot write sched stateframe to ACC'
if self.Toggle == 0:
# Schedule is in the GO state.
# First check if the current line needs to be started or stopped
line = self.L.get(self.curline)
status = self.status.get(self.curline)
now = mjd()
if (mjd(line) <= now and status == 'Waiting...') or status == 'Started...':
# This line has not been started, so do so now
self.status.delete(self.curline)
self.status.insert(self.curline,'Running...')
self.L.itemconfig(max(self.curline-1,0),background="white")
self.L.itemconfig(self.curline,background="orange")
self.L.see(min(self.curline+5,END))
self.status.see(min(self.curline+5,END))
#******
# Change to spawn this task as non-blocking function
# but make sure it returns, or there is some semaphore
# behavior with error checking
self.execute_cmds()
elif status == 'Running...':
if self.waitmode:
# If $WAIT is currently in force, decrement self.wait
# When self.wait = 0, continue executing commands starting with
# line self.nextctlline, which should be line following $WAIT
self.wait -= 1
print 'Waiting...',self.wait
sys.stdout.flush()
if self.wait == 0:
self.execute_cmds()
nextline = self.L.get(self.curline+1)
if mjd(nextline) <= now:
# Next line should be running
self.status.delete(self.curline)
self.status.insert(self.curline,'Done')
self.curline += 1
self.status.delete(self.curline)
self.status.insert(self.curline,'Running...')
self.L.itemconfig(self.curline-1,background="white")
self.L.itemconfig(self.curline,background="orange")
self.L.see(min(self.curline+5,END))
self.status.see(min(self.curline+5,END))
#******
# Change to spawn this task as non-blocking function
# but make sure it returns, or there is some semaphore
# behavior with error checking
self.execute_cmds()
self.status.configure(state=DISABLED)
# Debug info, simply logs that we have exited this procedure
#sys.stdout.write('-')
#sys.stdout.flush() # Flush stdout (/tmp/schedule.log or /tmp/schedule_[self.subarray_name].log) so we can see this '-'.
#============================
def dla2roach(self):
'''Set integer delays and send to all ROACHes for next second, to be
ready for next 1 PPS. Ant 1 delay is fixed at 9000 steps (delay
depends on step size), and all other antennas are set to
9000 + t_cen[i] + t_geom[i]/step_size, the latter being calculated
from sf_dict['delay1'][i] (nsec). The step_size is 1/f_ADC,
where f_ADC is ADC clock frequency, in GHz.
'''
global sh_dict, sf_dict
# Delay centers. These are read from the SQL database whenever a scan starts.
# The delay center offset (dlaoff) is now scaled to adc_clk, so that it is
# 11,000 for 800 MHz, and 16,500 for 1200 MHz.
# ****** Send delay0, which is actually the delay for the next upcoming second ******
adc_clk = self.brd_clk_freq*4./1000.
dlaoff = int(16500.*adc_clk/1.2)
# This swap is now done at $SCAN-START (and written to ACC)
#dcenidx = numpy.arange(16)
#if self.lorx:
# # If the low-frequency receiver is in place (i.e. an RX-SELECT LO ANT14 command
# # was sent), replace the Ant 14 delay centers with those in the Ant 15 slot.
# dcenidx[13:15] = [14,13]
#dlax = numpy.round((sh_dict['dlacen'][dcenidx] - sf_dict['delay'])*adc_clk + dlaoff)
#dlay = dlax + (sh_dict['dlaceny'] - sh_dict['dlacen'])[dcenidx]
dlax = numpy.round((sh_dict['dlacen'] - sf_dict['delay'])*adc_clk + dlaoff)
dlay = dlax + (sh_dict['dlaceny'] - sh_dict['dlacen'])
for r in self.roaches:
if r.fpga:
a1,a2 = r.ants
# Handle the case of a ROACH that has been set to sweep delays
# The dlasweep dictionary is normally None, but can be {'ant':n,'dla':a,'dlastop':b}
# which will sweep the delay by 1 step per second for delays from a to b relative
# to the current nominal delay.
if r.dlasweep is None:
r.set_delays([dlax[a1-1],dlay[a1-1],dlax[a2-1],dlay[a2-1]])
else:
if r.dlasweep['ant'] == a1:
# Increment delay by one
r.dlasweep['dla'] += 1
dla = r.dlasweep['dla']
if r.dlasweep['dla'] > r.dlasweep['dlastop']:
# The delay is greater than the stop delay, so cancel sweep
# by setting dictionary to None
dla = 0
r.dlasweep = None
else:
if r.dlasweep['pol'] == 'X':
r.set_delays([dlax[a1-1]+dla,dlay[a1-1],dlax[a2-1],dlay[a2-1]])
print 'DLASWEEP Ant',a1,'X delay',dla
elif r.dlasweep['pol'] == 'Y':
r.set_delays([dlax[a1-1],dlay[a1-1]+dla,dlax[a2-1],dlay[a2-1]])
print 'DLASWEEP Ant',a1,'Y delay',dla
else:
r.set_delays([dlax[a1-1]+dla,dlay[a1-1]+dla,dlax[a2-1],dlay[a2-1]])
print 'DLASWEEP Ant',a1,'X and Y delay',dla
elif r.dlasweep['ant'] == a2:
# Increment delay by one
r.dlasweep['dla'] += 1
dla = r.dlasweep['dla']
if r.dlasweep['dla'] > r.dlasweep['dlastop']:
# The delay is greater than the stop delay, so cancel sweep
# by setting dictionary to None
dla = 0
r.dlasweep = None
if r.dlasweep['pol'] == 'X':
r.set_delays([dlax[a1-1],dlay[a1-1],dlax[a2-1]+dla,dlay[a2-1]])
print 'DLASWEEP Ant',a2,'X delay',dla
elif r.dlasweep['pol'] == 'Y':
r.set_delays([dlax[a1-1],dlay[a1-1],dlax[a2-1],dlay[a2-1]+dla])
print 'DLASWEEP Ant',a2,'Y delay',dla
else:
r.set_delays([dlax[a1-1],dlay[a1-1],dlax[a2-1]+dla,dlay[a2-1]+dla])
print 'DLASWEEP Ant',a2,'X and Y delay',dla
elif r.dlasweep['ant'] == 0:
# If ant is 0, sweep delays for both antennas
# (used for total power polarization tests)
# Increment delay by one
r.dlasweep['dla'] += 1
dla = r.dlasweep['dla']
if r.dlasweep['dla'] > r.dlasweep['dlastop']:
# The delay is greater than the stop delay, so cancel sweep
# by setting dictionary to None
dla = 0
r.dlasweep = None
if r.dlasweep['pol'] == 'X':
r.set_delays([dlax[a1-1]+dla,dlay[a1-1],dlax[a2-1]+dla,dlay[a2-1]])
print 'DLASWEEP Ant',a1,'and',a2,'X delay',dla
elif r.dlasweep['pol'] == 'Y':
r.set_delays([dlax[a1-1],dlay[a1-1]+dla,dlax[a2-1],dlay[a2-1]+dla])
print 'DLASWEEP Ant',a1,'and',a2,'Y delay',dla
else:
r.set_delays([dlax[a1-1]+dla,dlay[a1-1]+dla,dlax[a2-1]+dla,dlay[a2-1]+dla])
print 'DLASWEEP Ant',a1,'and',a2,'X and Y delay',dla
if r.msg != 'Success':
self.error = r.msg+' '+r.roach_ip
#============================
def sequence2roach(self,sequence):
'''Set frequency sequence on the ROACH boards, so that application of band-dependent
coefficients is properly applied. The sequence numbers are 0-based band numbers
'''
# Convert from comma-separated variables to zero-based band numbers
bands = numpy.array(sequence.split(',')).astype('int')-1
for r in self.roaches:
if r.fpga:
r.set_sequence(bands)
if r.msg != 'Success':
self.error = r.msg+' '+r.roach_ip
#============================
def sequence2dcmtable(self,sequence):
'''Use frequency sequence to set dcmtable.txt and send to ACC
The sequence numbers are 0-based band numbers
'''
# Convert from comma-separated variables to zero-based band numbers
bands = numpy.array(sequence.split(',')).astype('int')-1
dcm, buf = cal_header.read_cal(2)
dcm_m_attn = stateframe.extract(buf,dcm['Attenuation'])
dcm_attn = dcm_m_attn[bands]
lines = []
g = open('DCM_table.txt','w')
for line in dcm_attn:
l = ' '.join(map(str,line))
lines.append(l)
g.write(l+'\n')
g.close()
cal_header.dcm_table2sql(lines)
# Connect to ACC /parm directory and transfer scan_header files
try:
g = open('DCM_table.txt','r')
acc = FTP('acc.solar.pvt')
acc.login('admin','observer')
acc.cwd('parm')
# Send DCM table lines to ACC
print acc.storlines('STOR dcm.txt',g)
g.close()
print 'Successfully wrote dcm.txt to ACC'
except:
print 'Cannot FTP dcm.txt to ACC'
#============================
def execute_cmds(self):
'''Execute the atomic commands associated with the current line
of the schedule. First read the commands from the associated
file and enter them into the L2 Listbox. Then read them one
at a time from the Listbox and execute them.
'''
global sf_dict, sh_dict
# Update the status file in /common/webplots for display on the status web page
self.update_status()
# Get time range of this Macro command
mjd1 = mjd(self.L.get(self.curline))
mjd2 = mjd(self.L.get(self.curline+1))
# Find the file associated with the Macro command on the current
# line and fill in the L2 Listbox
line = self.L.get(self.curline)
cmds = line[20:].split()
f2 = open(cmds[0].rstrip()+'.ctl')
self.L2.delete(0,END)
# Current options for source ID
if cmds[0].upper() == 'SUN':
sh_dict['project'] = 'NormalObserving'
sh_dict['source_id'] = 'Sun'
sh_dict['track_mode'] = 'PLANET'
elif cmds[0].upper() == 'SOLPNTCAL':
sh_dict['project'] = 'SOLPNTCAL'
sh_dict['source_id'] = 'Sun'
sh_dict['track_mode'] = 'PLANET'
elif cmds[0].upper() == 'PLANET':
sh_dict['project'] = 'PLANET'
sh_dict['source_id'] = cmds[1]
sh_dict['track_mode'] = 'PLANET'
elif cmds[0].upper() == 'FEATTNTEST':
sh_dict['project'] = 'FEATTNTEST'
sh_dict['source_id'] = 'Sun'
sh_dict['track_mode'] = 'PLANET'
elif cmds[0].upper() == 'PHASECAL':
sh_dict['project'] = 'PHASECAL'
sh_dict['source_id'] = cmds[1]
sh_dict['track_mode'] = 'RADEC '
elif cmds[0].upper() == 'PHASECAL_LO':
sh_dict['project'] = 'PHASECAL'
sh_dict['source_id'] = cmds[1]
sh_dict['track_mode'] = 'RADEC '
elif cmds[0][:5].upper() == 'PACAL':
sh_dict['project'] = 'PHASECAL'
sh_dict['source_id'] = cmds[1]
sh_dict['track_mode'] = 'RADEC '
elif cmds[0].upper() == 'CALPNTCAL':
sh_dict['project'] = 'CALPNTCAL'
sh_dict['source_id'] = cmds[1]
sh_dict['track_mode'] = 'RADEC '
elif cmds[0].upper() == 'STARBURST':
sh_dict['project'] = 'STARBURST'
sh_dict['source_id'] = cmds[1]
sh_dict['track_mode'] = 'RADEC '
print 'Source is',cmds[1]
elif cmds[0].upper() == 'GEOSAT' or cmds[0].upper() == 'DELAYCAL':
sh_dict['project'] = 'GEOSAT'
sh_dict['source_id'] = cmds[1].replace('_',' ')
# These are geostationary satellites so far. If/when we add
# moving satellite capability, track_mode for those should be 'SATELL'
sh_dict['track_mode'] = 'FIXED '
try:
f = urllib2.urlopen('http://www.celestrak.com/NORAD/elements/geo.txt',timeout=20)
lines = f.readlines()
except:
print util.Time.now().iso,'Connection to Celestrak timed out.'
sh_dict['source_id']='None'
lines = ['']
for i,line in enumerate(lines):
if line.find(sh_dict['source_id']) == 0:
break
if i < len(lines):
# This creates an ephem.EarthSatellite object, which does the
# right thing in calculating coordinates when the time in aa is updated
sat=ephem.readtle(lines[i],lines[i+1],lines[i+2])
sf_dict['geosat']=sat
sat.compute(self.aa)
# Unfortunately, aipy cannot deal with an ephem.EarthSatellite object,
# so this creates a fake RadioFixedBody for the current RA,Dec of the
# satellite, to be added to the source catalog. This has to be updated
# once per second in set_uvw()
geosat=aipy.amp.RadioFixedBody(sat.ra,sat.dec,name=sat.name)
self.aa.cat.add_srcs([geosat,geosat])
else:
print 'Geosat named ',sh_dict['source_id'],'not found!'
sh_dict['source_id']='None'
else:
# Default project is just the first command on line (truncate to 32 chars)
sh_dict['project'] = cmds[0][:32]
print cmds[0][:32]
if len(cmds) == 1:
# Case of only one string on command line
sh_dict['source_id'] = 'None'
else:
# Default source ID is second string on command line (truncate to 12 chars)
sh_dict['source_id'] = cmds[1][:12]
print sh_dict['source_id']
sh_dict['track_mode'] = 'FIXED '
lines = f2.readlines()
for ctlline in lines:
# Check for hash mark (#) in line other than first character
# (hash mark in first character means a comment)
if '#' in ctlline[1:]:
# We have a substitution to do
ihash = ctlline[1:].find('#')+1
i = int(ctlline[ihash+1:ihash+2])
ctlline = ctlline[:ihash]+cmds[i]+ctlline[ihash+2:]
self.L2.insert(END,ctlline.rstrip('\n'))
f2.close()
# Now read the atomic commands one at a time, executing locally
# those starting with $, and sending the others to the ACC.
if self.waitmode:
# If $WAIT is in effect, start with next following line
sline = self.nextctlline
# After executing, turn off waitmode
self.waitmode = False
# self.nextctlline = 0
else:
sline = 0
for i in range(sline,len(lines)):
ctlline = self.L2.get(i)
self.execute_ctlline(ctlline,mjd1,mjd2)
if ctlline.split()[0].upper() == '$WAIT':
self.nextctlline = i+1
break
def sendctlline(self,ctlline):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.accini['host'],self.accini['scdport']))
s.send(ctlline)
time.sleep(0.01)
s.close()
except:
pass
def interpret_pcycle(self, ctlline):
''' Interprets the control line containing a $PCYCLE command, of the
form $PCYCLE <device> <antlist>, where <cevice> is one of 'ant',
'fem', 'frontend', 'crio', and <antlist> is the usual antenna list.
This is all NOT case-sensitive. Note that only the first three
characters of the device name are examined. If the device is 'ant',
then the device name is optional, i.e.
$PCYCLE ant1 ant2-4 ant7
will work.
The device and antenna list are returned. If line cannot be
interpreted, device is None
'''
valid = {'ANT':'ANT','FEM':'FRONTEND','FRO':'FRONTEND','CRI':'CRIO','OTH':'OTHER'}
tokens = ctlline.upper().split()
# First token is just the command. Check whether the second token is valid:
if len(tokens) == 1:
return None, None
try:
# Check that first three characters of second token matches valid
# devices, and set device to full spelling.
device = valid[tokens[1][:3]]
except KeyError:
return None, None
if device != 'ANT': tokens = tokens[1:]
# Now check remaining tokens for antenna number
ants = []
for token in tokens[1:]:
try:
# Try to interpret ANTn where n is an integer
junk, num = token.split('ANT')
if num != '': ants.append(int(num))
except:
try:
# Try to interpret ANTm-n, where m and n are integers
a1, a2 = num.split('-')
for i in range(int(a1),int(a2)+1):
ants.append(i)
except:
return None, None
return device, ants
#============================
def execute_ctlline(self,ctlline,mjd1=None,mjd2=None):
# Send line to ACC. Lines that start with '$' will be entered
# into stateframe without execution by ACC. Lines that start
# with '$*' are Starburst commands.
# Skip comments
if ctlline[0] == '#':
pass
else:
if ctlline.strip().upper() == 'DCMAUTO-ON':
self.sendctlline('DCMTABLE DCM.TXT')
self.sendctlline(ctlline)
# try:
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.connect((self.accini['host'],self.accini['scdport']))
# s.send(ctlline)
# time.sleep(0.01)
# s.close()
# except:
# pass
if ctlline[0] == '$':
# This line starts with '$', so execute locally
# if ctlline[1] == '*':
# # This is a Starburst-specific command, pass it to starburst module to handle
# # Pass it sh_dict and sf_dict in case it needs to modify values - dicts are mutable so should
# # be able to do this
# starburst.execute_ctlline(self,ctlline,sh_dict,sf_dict,mjd1,mjd2)
#==== MK_TABLES ====
if ctlline.split()[0].upper() == '$MK_TABLES':
cmd, fname, src = ctlline.split()
if mjd1 is None:
d = util.datime()
mjd1 = d.get()
mjd2 = mjd1+1
if fname.upper() == 'GEOSAT_TAB':
tbl = make_geosattable(sf_dict['geosat'],self.aa,mjd1,mjd2)
else:
tbl = make_tracktable(src,self.aa,mjd1,mjd2)
# Write out to file with .radec extension
fname = fname+'.radec'
f = open('/tmp/'+fname,'w')
f.write(tbl)
f.close()
time.sleep(0.01)
# Send tracktable file to acc
f = open('/tmp/'+fname,'r')
acc = FTP(self.accini['host'])
acc.login('admin','observer')
acc.cwd('parm')
acc.storlines('STOR '+fname,f)
acc.close()
f.close()
#userpass = 'admin:observer@'
f = urllib2.urlopen('ftp://'+userpass+'acc.solar.pvt/parm/'+fname)
tbl_echo = ''
for line in f.readlines():
tbl_echo += line.rstrip()+'\n'
if tbl != tbl_echo:
print 'Error: Transfer of track table',fname,'failed!'
print tbl,'not equal\n',tbl_echo
#==== FEM-INIT ====
elif ctlline.split()[0].upper() == '$FEM-INIT':
ant_str = 'ant1-13'
t = threading.Thread(target=adc_cal2.set_fem_attn, kwargs={'ant_str':ant_str})
t.daemon = True
t.start()
#==== CAPTURE-1S ====
elif ctlline.split()[0].upper() == '$CAPTURE-1S':
# Use $CAPTURE-1S <stem> where <stem> is a string to add to the end of the
# capture filename. The capture is done on the dpp. This will take a few
# seconds to complete.
try:
cmd, stem = ctlline.strip().split()
except:
# Must be no stem given, so use ''
stem = ''
# Capture 1 s of data on dpp
t = threading.Thread(target=pcapture2.capture_1s, kwargs={'stem':stem})
t.daemon = True
t.start()
#==== SCAN-START ====
elif ctlline.split()[0].upper() == '$SCAN-START':
# Command by itself is normal scan start, while $SCAN-START NODATA
# means set up scan, but do not take data. Used for some calibrations.
nodata = ctlline.strip().split()[-1].upper()
# Do any tasks here that are required to start a new scan
sys.stdout.write('Started new scan\n')
# Check for Ant 14 low-frequency receiver status
self.lorx = False # Default (normal) position is high frequency receiver
# Check Ant 14 Receiver Position Status
data, msg = stateframe.get_stateframe(self.accini)
FEMA = self.accini['sf']['FEMA']
if stateframe.extract(data,FEMA['Timestamp']) != 0:
# This is a valid record, so proceed
if stateframe.extract(data,FEMA['PowerStrip']['RFSwitchStatus']) == 0:
# The switch position is right for LoRX
RX_pos = stateframe.extract(data,FEMA['FRMServo']['RxSelect']['Position']) + stateframe.extract(data,FEMA['FRMServo']['RxSelect']['PositionError'])
if RX_pos < 150.:
# Consistent with LoRX being in position, or heading there, so set as True
self.lorx = True
print 'Ant 14 delays will be set for LO-Frequency Receiver'
else:
print 'Ant 14 outlet set for LO-Frequency Receiver, but RxcSelect position is wrong.'
print 'Ant 14 delays will be set for HI-Frequency Receiver.'
else:
print 'Ant 14 delays will be set for HI-Frequency Receiver.'
else:
print 'LO-Frequency Receiver check failed due to bad (0) stateframe.'
# Fetch current delay centers from SQL database, and write them to
# the ACC file /parm/delay_centers.txt, which is used by the dppxmp program
xml, buf = cal_header.read_cal(4)
try:
xml, buf = cal_header.read_cal(4)
dcenters = stateframe.extract(buf,xml['Delaycen_ns'])
if self.lorx:
# If the LO-frequency receiver is active, put delays in slot for Ant 15 into Ant 14
dcenters[13] = dcenters[14]
timestr = Time(int(stateframe.extract(buf, xml['Timestamp'])), format='lv').iso
f = open('/tmp/delay_centers.txt', 'w')
f.write('# Antenna delay centers, in nsec, relative to Ant 1\n')
f.write('# Date: ' + timestr + '\n')
f.write('# Note: For historical reasons, dppxmp needs four header lines\n')
f.write('# Ant X Delay[ns] Y Delay[ns]\n')
fmt = '{:4d} {:10.3f} {:10.3f}\n'
for i in range(16):
f.write(fmt.format(i + 1, *dcenters[i]))
f.close()
time.sleep(0.1) # Make sure file has time to be closed.
f = open('/tmp/delay_centers.txt', 'r')
acc = FTP('acc.solar.pvt')
acc.login('admin', 'observer')
acc.cwd('parm')
# Send DCM table lines to ACC
print acc.storlines('STOR delay_centers.txt', f)
f.close()
print 'Successfully wrote delay_centers.txt to ACC'
sh_dict['dlacen'] = dcenters[:,0]
sh_dict['dlaceny'] = dcenters[:,1]
except:
print t.iso,'SQL connection for delay centers failed. Delay center not updated'
if self.subarray_name == 'Subarray1':
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Send commands to update antenna trip information
s.connect((self.accini['host'],self.accini['scdport']))
s.send('UPDATEAZIMUTHDIAGNOSTICS 1')
time.sleep(0.01)
s.send('UPDATEELEVATIONDIAGNOSTICS 1')
time.sleep(0.01)
s.close()
except:
pass
# We need an initial call to set_uvw() in order to set RA, Dec and HA
# coordinates in scan header dictionary.
srcname = sh_dict['source_id']
try:
# Generate a Time() object at exactly the next upcoming second (time t+1)
t2 = util.Time.now()
tsec = util.Time(t2.mjd + (1 - t2.datetime.microsecond/1000000.)/86400.,format='mjd')
src = self.aa.cat[srcname] # This causes KeyError if source is not found
except KeyError:
# The current scan header source ID is not in the source catalog
srcname = None
sh_dict['timestamp'] = tsec.lv
if srcname is not None:
set_uvw(self.aa,tsec,srcname)
print 'Current RA, Dec, HA:',sh_dict['ra'],sh_dict['dec'],sh_dict['ha']
sys.stdout.flush()
# Read KATADC status registers. This can take a long time...
sys.stdout.write('There are '+str(len(self.roaches))+' active ROACHes\n')
sys.stdout.flush()
for r in self.roaches:
rnum = int(r.roach_ip[5:6]) - 1
sys.stdout.write('Reading KATADC for '+r.roach_ip+'...')
sys.stdout.flush()
if r.fpga:
r.get_katadc_dict()
if r.msg == 'Success':
sh_dict['katadc'][rnum].update(r.katadc)
sys.stdout.write(r.msg+'\n')
sys.stdout.flush()
else:
# In case of failure, set to empty dictionary
sh_dict['katadc'][rnum] = {}
sys.stdout.write('Failed:'+r.msg+'\n')
sys.stdout.flush()
# This fails, for some reason--probably just takes too long
#sys.stdout.write('Reading clock...')
#sys.stdout.flush()
#r.brd_clk = r.fpga.est_brd_clk()
#if r.msg == 'Success':
# sh_dict['roach_brd_clk'][rnum].update(r.brd_clk)
# sys.stdout.write(r.msg+'\n')
# sys.stdout.flush()
#else:
# # In case of failure, set to empty dictionary
# sh_dict['katadc'][rnum] = {}
# sys.stdout.write('Failed:'+r.msg+'\n')
# sh_dict['roach_brd_clk'][rnum] = 0
else:
# In case of no communication, set to empty dictionary
sh_dict['katadc'][rnum] = {}
sh_dict['roach_brd_clk'][rnum] = 0
sys.stdout.write('FPGA communication failed\n')
sys.stdout.flush()
# Read acc0time.txt file from ACC and update scan header
try:
f = urllib2.urlopen('ftp://'+userpass+'acc.solar.pvt/parm/acc0time.txt',timeout=1)
mjdacc0 = np.double(f.readline().split()[0])
f.close()
except:
print t.iso,'ACC connection for acc0 time timed out. Reading from /tmp/acc0time.txt'
f = open('/tmp/acc0time.txt','r')
mjdacc0 = np.double(f.readline().split()[0])
f.close()
sh_dict['time_at_acc0'] = Time(mjdacc0,format='mjd')
# if self.subarray_name == 'Starburst':
# # get a dictionary with any Starburst-specific scan header data and add it to sh_dict
# sh_dict_starburst = starburst.get_sh_dict(self,ctlline)
# sh_dict.update(sh_dict_starburst)
# # make a copy of scan_header file including Starburst-specific data and copy it to Starburst server
# starburst.write_scan_header(sh_dict,self.sh_datfile)
# else: # write OVSA scan header and store on ACC
scan_header(sh_dict,self.sh_datfile)
# If we are connected to the SQL database, send converted scan header
if self.sql['cnxn']:
f = open(self.sh_datfile)
data = f.read()
f.close()
bufout = stateframedef.transmogrify(data, self.sql['shbrange'])
try:
self.sql['cursor'].execute('insert into hBin (Bin) values (?)',
stateframedef.pyodbc.Binary(bufout))
self.sql['cnxn'].commit()
sys.stdout.write('Scan Header Record successfully written to SQL Server\n')
sys.stdout.flush()
except:
# An exception could be an error, or just that the entry was already inserted
sys.stdout.write('Writing Scan Header record to SQL Server FAILED\n')
sys.stdout.flush()
self.error = 'Err: Cannot write scan header to SQL'
if nodata == 'NODATA':
pass
else:
# Set scan state to on
sf_dict['scan_state'] = 1
#==== SCAN-RESTART ====
elif ctlline.split()[0].upper() == '$SCAN-RESTART':
# This command is for restarting a scan with the same setup as the
# previously running scan, where only the scan state must be turned on
# Set scan state to on
sf_dict['scan_state'] = 1
#==== SCAN-STOP ====
elif ctlline.split()[0].upper() == '$SCAN-STOP':
sf_dict['scan_state'] = -1
#==== PA-SWEEP ====
elif ctlline.split()[0].upper() == '$PA-SWEEP':
# Rotate 27-m focus rotation mechanism to sweep through a given angle
# range (does nothing if PA adjustment routine is already running)
# Usage: $PA-SWEEP PA rate, where angle is swept from -PA to PA at
# rate of 1-degree-per-rate [s]
print 'Got '+ctlline.split()[0].upper()+' command.'
if self.PAthread is None or not self.PAthread.is_alive():
# Thread is not already running, so it is safe to proceed
try:
PA,rate = map(numpy.int,ctlline.strip().split()[-2:])
except:
# Reading arguments failed, so use defaults
PA = 80
rate = 3
print 'PA and rate are ',PA,rate
try:
# Spawn the stateframe.PA_sweep() routine to update PA once/rate
self.PAthread = threading.Thread(target=stateframe.PA_sweep,kwargs={'PA':PA,'rate':rate})
self.PAthread.daemon = True
self.PAthread.start()
print 'PAthread started.'
except:
# Something went wrong
print 'Error spawning PA_sweep task'
pass
#==== PA-TRACK ====
elif ctlline.split()[0].upper() == '$PA-TRACK':
# Track 27-m focus rotation mechanism to correct for parallactic angle
# of given antenna (does nothing if antenna not correctly specified)
# Usage: $PA-TRACK ant4 <CROSSED>
print 'Got '+ctlline.split()[0].upper()+' command.'
if self.PAthread is None or not self.PAthread.is_alive():
# Thread is not already running, so it is safe to proceed
antstr = ctlline.strip().split()[1].upper()
crossed = False
if len(ctlline.strip().split()) == 3:
if ctlline.strip().split()[2].upper() == 'CROSSED':
crossed = True
print 'Given antenna is '+antstr
try:
# Spawn the stateframe.PA_adjust() routine to update PA once/minute
antn = pcapture2.ant_str2list(antstr)[0]
print 'Antenna index is',antn
self.PAthread = threading.Thread(target=stateframe.PA_adjust,kwargs={'ant':antn,'crossed':crossed})
self.PAthread.daemon = True
self.PAthread.start()
print 'PAthread started.'
except:
# Antenna not correctly specified, so do not spawn routine
print 'Antenna specification no good?'
pass
#==== PA-STOP ====
elif ctlline.split()[0].upper() == '$PA-STOP':
# Send Abort string to stateframe.PA_adjust() routine. Note that
# abort may not be acted upon until up to 1 s later.
if self.PAthread and self.PAthread.is_alive():
stateframe.q.put_nowait('Abort')
#==== TRIPS ====
elif ctlline.split()[0].upper() == '$TRIPS':
try:
# Send commands to update antenna trip information
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.accini['host'],self.accini['scdport']))
s.send('UPDATEAZIMUTHDIAGNOSTICS 1')
time.sleep(0.01)
s.send('UPDATEELEVATIONDIAGNOSTICS 1')
time.sleep(0.01)
s.close()
except:
pass
#==== DLASWEEP ====
elif ctlline.split()[0].upper() == '$DLASWEEP':
try:
vals = ctlline.split()
print vals
if len(vals) == 4:
junk,ant,dla,dlastop = vals
pol = None
elif len(vals) == 5:
junk,ant,dla,dlastop,pol = vals
for r in self.roaches:
a1,a2 = r.ants
if int(ant) == a1 or int(ant) == a2 or int(ant) == 0:
r.dlasweep = {'ant':int(ant),'dla':int(dla),'dlastop':int(dlastop),'pol':pol}
except:
print 'Could not interpret $DLASWEEP command'
#==== WAIT ====
elif ctlline.split()[0].upper() == '$WAIT':
# Need to wait for given number of seconds, so set self.waitmode to True,
# set self.nextctlline to point to next following line, and record duration
try:
dur = int(ctlline.split()[1])
except:
print 'Could not interpret duration on $WAIT command--defaulting to 10 s'
dur = 10
self.waitmode = True
#self.nextctlline = i+1
self.wait = dur
print 'Initializing wait for',dur,'seconds'
#break
#==== PCYCLE ====
elif ctlline.split()[0].upper() == '$PCYCLE':
# Cycle the power of some device (antenna controller, fem, or crio)
# for a given antenna.
device, ants = self.interpret_pcycle(ctlline)
if device is None:
print 'Error interpreting $PCYCLE command',ctlline
else:
# Since device is not None, interpreting tokens succeeded.
if device == 'ANT':
# Turn off all antennas that are to be power cycled
antstr = ''
for antnum in ants:
antstr += ' ant'+str(antnum)
self.sendctlline('powerswitch 0 '+antstr)
# Spawn tasks to perform power cycle (each takes awhile)
for antnum in ants:
t = threading.Thread(target=pwr_cycle.ant_toggle, args=(antnum, device))
t.daemon = True
t.start()
#==== KATADC_GET ====
elif ctlline.split()[0].upper() == '$KATADC_GET':
# Get the standard deviation for each KatADC (assumes frequency tuning is static)
# Note, takes about 0.2 s for each active ROACH
for r in self.roaches:
r.get_attn()
rnum = int(r.roach_ip[5:6])
if r.msg == 'Success':
sdev = dict(zip(['sdev.adc0.h','sdev.adc0.v','sdev.adc1.h','sdev.adc1.v'],r.sdev))
sh_dict['katadc'][rnum].update(sdev)
else:
# In case of failure, set to empty dictionary
sh_dict['katadc'][rnum] = {}
#==== REWIND ====
elif ctlline.split()[0].upper() == '$REWIND':
# Get date of first line of current schedule
# and increment by 1 day, then autogenerate a
# new schedule
self.toggle_state() # Turn off schedule
# Get time of first line in schedule and add a day
mjd1 = mjd(self.L.get(0))
t = util.Time(mjd1+1,format='mjd')
self.Today(t)
self.Clear()
self.toggle_state() # Turn schedule back on
#==== LNA_INIT ====
elif ctlline.split()[0].upper() == '$LNA-INIT':
# Get LNA_settings.txt file from ACC and send the series of
# commands needed to set the LNA voltages
#userpass = 'admin:observer@'
lnafile = urllib2.urlopen('ftp://'+userpass+'acc.solar.pvt/parm/LNA_settings.txt',timeout=1)
lines = lnafile.readlines()
lnafile.close()
lnas = {0:'hh',1:'lh',2:'lv',3:'hv'}
lnas_a = [{},{},{},{}]
lnas_b = [{},{},{},{}]
try:
for i,line in enumerate(lines):
if line.find('[FEMA]') == 0:
# Found FEMA section, so take lines i+2 through i+5 as data lines
for k in range(4):
lna,fstr,polstr,model,sn,vdrain,vg1,vg2,idrain =lines[i+2+k].split()
lnas_a[int(lna)] = {'vd':float(vdrain),'vg1':float(vg1),'vg2':float(vg2)}
if line.find('[FEMB]') == 0:
# Found FEMB section, so take lines i+2 through i+5 as data lines
for k in range(4):
lna,fstr,polstr,model,sn,vdrain,vg1,vg2,idrain =lines[i+2+k].split()
lnas_b[int(lna)] = {'vd':float(vdrain),'vg1':float(vg1),'vg2':float(vg2)}
except:
print 'Error reading/parsing LNA_settings.txt file from ACC'
try:
for i in range(4):
cmdstr = 'LNA-ENABLE '+lnas[i]+' on ANT14'
self.sendctlline(cmdstr)
cmdstr = 'LNA-DRAIN '+lnas[i]+' '+str(lnas_a[i]['vd'])+' ANT14'
self.sendctlline(cmdstr)
cmdstr = 'LNA-GATE1 '+lnas[i]+' '+str(lnas_a[i]['vg1'])+' ANT14'
self.sendctlline(cmdstr)
cmdstr = 'LNA-GATE2 '+lnas[i]+' '+str(lnas_a[i]['vg2'])+' ANT14'
self.sendctlline(cmdstr)
cmdstr = 'LNA-ENABLE '+lnas[i]+' on ANT15'
self.sendctlline(cmdstr)
cmdstr = 'LNA-DRAIN '+lnas[i]+' '+str(lnas_b[i]['vd'])+' ANT15'
self.sendctlline(cmdstr)
cmdstr = 'LNA-GATE1 '+lnas[i]+' '+str(lnas_b[i]['vg1'])+' ANT15'
self.sendctlline(cmdstr)
cmdstr = 'LNA-GATE2 '+lnas[i]+' '+str(lnas_b[i]['vg2'])+' ANT15'
self.sendctlline(cmdstr)
except:
print 'Error sending LNA_settings to ACC'
#==== SUBARRAY ====
elif ctlline.split()[0].upper() == '$SUBARRAY':
# run the SUBARRRAY1 command if this is the master schedule,
# otherwise run the SUBARRAY2 command
print '$SUBARRAY line is:',ctlline
if ctlline.find('.antlist') == -1:
# there is no .antlist file in this line --> the antlist
# should be directly specified in the line, e.g.:
# $SUBARRAY ant1 ant7-8,ant10
l = len('$SUBARRAY ')
antlist = ctlline[l:]
else:
# a .antlist file is specified --> read antlist from
# the specified .antlist file
antlistfile = ctlline.split()[1]
try:
antlistname = ctlline.split()[2]
antlist = get_antlist(antlistname,antlistfile)
except:
antlist = ''
if antlist == '':
self.error = '$SUBARRAY: antlist name not in ' + antlistfile
return
if self.subarray_name == 'Subarray1':
N = 1
else:
N = 2
cmd = 'SUBARRAY' + str(N) + ' ' + antlist
try: # send appropriate command to ACC
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.accini['host'],self.accini['scdport']))
s.send(cmd)
time.sleep(0.01)
s.close()
print 'ctl cmd \'' + ctlline + '\' sent to ACC as \'' + cmd + '\''
sys.stdout.flush()
except:
print 'ctl cmd \'' + cmd + '\' not succesfully sent to ACC'
sys.stdout.flush()
pass
else:
cmds = ctlline.split()
if cmds[0].upper() == 'FSEQ-FILE':
# This is an FSEQ-FILE command, so find and set frequency sequence
# Just FTP sequence file from ACC
fseqfile = urllib2.urlopen('ftp://'+userpass+'acc.solar.pvt/parm/'+cmds[1])
nrpt = None # Initially not defined
fsequence = '' # Initially empty
for line in fseqfile.readlines():
# Find DWELL line (contains 35 dwell times, in s, one for each
# defined band) and split into 35 "repeat" numbers
keywd = 'LIST:DWELL'
if line.find(keywd) == 0:
dwellseq = line[len(keywd):].split(',')
if len(dwellseq) != 35:
print 'FSEQ file',cmds[1],'DWELL line must have 35 entries.'
break
# Find nearest-integer number of 0.02 s periods
nrpt = (numpy.array(dwellseq).astype('float')/0.02 + 0.5).astype('int')
keywd = 'LIST:SEQUENCE'
if line.find(keywd) == 0:
if nrpt is None:
print 'FSEQ file',cmds[1],'DWELL line must come before SEQUENCE line.'
break
bands = numpy.array(line[len(keywd):].split(',')).astype('int')
# Step through bands in fsequence and repeat them according to
# nrpt in order to form a 50-element sequence
for band in bands:
for i in range(nrpt[band]):
fsequence += str(band)+','
break
if fsequence == '':
print 'FSEQ file',cmds[1],'not successfully interpreted.'
# Default to allowing all channels in RFI mask
sh_dict.update({'chanmask':numpy.array([1]*204800,'byte')})
else:
sh_dict.update({'fsequence':fsequence[:-1]}) # -1 removes trailing ','
chanmask = cu.get_chanmask(fsequence[:-1])
sh_dict.update({'chanmask': chanmask})
# Get nominal Chan2Wide assignment, then multiply by chanmask and update it
fseqlist = fsequence[:-1].rsplit(',')
item = []
for band in fseqlist:
ch = cu.chan_asmt(int(band))
item += ch
sh_dict.update({'chan2wide':numpy.array(item)*sh_dict['chanmask']})
self.sequence2roach(fsequence[:-1])
self.sequence2dcmtable(fsequence[:-1])
def update_status(self):
# Read the current schedule from the list window and write to output file
fileout = open('/common/webplots/status.txt','w')
for i in range(self.lastline):
line = self.L.get(i)
if i == self.curline:
line = '* '+line
else:
line = ' '+line
fileout.write(line+'\n')
fileout.close()
app = App()
mainloop()
|
gpl-2.0
|
lin-credible/scikit-learn
|
sklearn/covariance/__init__.py
|
389
|
1157
|
"""
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
|
bsd-3-clause
|
Lawrence-Liu/scikit-learn
|
examples/text/document_classification_20newsgroups.py
|
222
|
10500
|
"""
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
bsd-3-clause
|
RJC0514/micromeritics
|
documentation/bjh_ui.py
|
1
|
5464
|
"""
User interface for the BJH Jupyter notebook
Includes toggles for bjh correction, thickness curve, and isotherm range
"""
# Calculations
import numpy as np
import scipy.interpolate as intrp
from micromeritics import isotherm_examples as ex, thickness, bjh, util
# Graphing
import matplotlib.pyplot as plt
import matplotlib.ticker
# Display
from IPython.display import display, clear_output
from ipywidgets import HBox, Layout, HTML
import ipywidgets as widgets
s = ex.silica_alumina()
prel = s.Prel
qads = s.Qads
APF = 9.53000
DCF = 0.0015468
adsorption = True
def bjh_plot(x=None):
"""
This was created assuming the matplotlib inline backend is used (%matplotlib inline)
If %matplotlib notebook is used it may be more efficient to create a graph
once and use the ipywidgets to update/change the data
Takes an argument b/c wgt.observe calls BJHPlot(wgt.value) but we need the
value of all the wgts to do the calculation so we use global vars instead
"""
# Remove old graph but wait until new graph is made so the page won't change position
clear_output(wait=True)
fig = plt.figure(figsize=(15.5, 11))
# Adjust space between VolPlot and AreaPlot
fig.subplots_adjust(hspace=0.25)
# Create VolPlot
vol_plot = fig.add_subplot(2, 1, 1)
vol_plot.set_title('Cumulative Volume')
vol_plot.grid()
# Set x-axis (pore radius)
vol_plot.set_xlabel('Pore Radius [$\AA$]')
vol_plot.set_xscale('log')
vol_plot.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
vol_plot.set_xticks([5, 10, 50, 100, 500, 1000])
# Set y-axis 1 (cumulative pore volume)
vol_plot.set_ylabel('Pore Volume [cm3/g]')
# Create AreaPlot
area_plot = fig.add_subplot(2, 1, 2)
area_plot.set_title('Cumulative Area')
area_plot.grid()
# Set x-axis (pore radius)
area_plot.set_xlabel('Pore Radius [$\AA$]')
area_plot.set_xscale('log')
area_plot.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
area_plot.set_xticks([5, 10, 50, 100, 500, 1000])
# Set y-axis 1 (cumulative pore area)
area_plot.set_ylabel('Pore Area [m2/g]')
# Get currently selected widget values
selected_cor = cor_wgt.value
selected_thk = thk_wgt.value
selected_iso0 = iso_wgt.value[0]
selected_iso1 = iso_wgt.value[1]
# Restrict Prel if needed
if selected_iso0 == 0.0 and selected_iso1 == 1.0:
r_prel, r_qads = prel, qads
else:
r_prel, r_qads = util.restrict_isotherm(prel, qads, selected_iso0, selected_iso1)
# Run BJH Calculation
bjh_data = bjh_dict[selected_cor](r_prel, r_qads, APF, DCF, thk_dict[selected_thk], adsorption)
r, v, a = bjh_data.PoreRadii, bjh_data.cumV, bjh_data.cumA
# Plot Volume
vol_plot.plot(r, v, 'or-', alpha=0.7)
vol_plot.autoscale_view(True, True, True)
# Set Volume y-axis 2 (derivative)
d_vol_plot = vol_plot.twinx()
d_vol_plot.set_ylabel('Derivative [cm3/g]')
# Calculate Volume Derivative (this might differ from MicroActive)
smooth_v = intrp.splrep(np.log(r[::-1]), v[::-1], s=0.0002)
dV = intrp.splev(np.log(r[::-1]), smooth_v, der=1)
dV = -dV[::-1]*np.log(10)
# Plot Volume Derivative
d_vol_plot.plot(r, dV, 'oc-', alpha=0.7)
d_vol_plot.autoscale_view(True, True, True)
# Plot Area
area_plot.plot(r, a, 'or-', alpha=0.7)
area_plot.autoscale_view(True, True, True)
# Set Area y-axis 2 (derivative)
d_area_plot = area_plot.twinx()
d_area_plot.set_ylabel('Derivative [m2/g]')
# Calculate Area Derivative (this might differ from MicroActive)
smooth_a = intrp.splrep(np.log(r[::-1]), a[::-1], s=0.0002)
dA = intrp.splev(np.log(r[::-1]), smooth_a, der=1)
dA = -dA[::-1]*np.log(10)
# Plot Area Derivative
d_area_plot.plot(r, dA, 'oc-', alpha=0.7)
d_area_plot.autoscale_view(True, True, True)
# Widget dictionary
bjh_dict = {
1: bjh.standard,
2: bjh.kjs,
3: bjh.faas
}
thk_dict = {
1: thickness.KrukJaroniecSayari(),
2: thickness.Halsey(),
3: thickness.HarkinsJura(),
4: thickness.BroekhoffDeBoer(),
5: thickness.CarbonBlackSTSA()
}
def bjh_display():
global cor_wgt
global thk_wgt
global iso_wgt
# Widgets
cor_wgt = widgets.ToggleButtons(
options={'Standard': 1, 'Kruk-Jaroniec-Sayari': 2, 'Faas': 3},
value=1
)
cor_wgt.observe(bjh_plot, names='value')
thk_wgt = widgets.ToggleButtons(
options={'Kruk-Jaroniec-Sayari': 1, 'Halsey': 2, 'Harkins and Jura': 3,
'Broekhoff-de Boer': 4, 'Carbon Black STSA': 5},
value=3
)
thk_wgt.observe(bjh_plot, names='value')
iso_wgt = widgets.FloatRangeSlider(
value=[0.0, 1.0],
min=0.0, max=1.0, step=0.01,
continuous_update=False,
layout=Layout(width='45%')
)
iso_wgt.observe(bjh_plot, names='value')
tab_wgts = [cor_wgt, thk_wgt, HBox([HTML(value='Relative Pressure'), iso_wgt])]
tab_names = {0: 'BJH Correction', 1: 'Thickness Curve', 2: 'Isotherm'}
tab = widgets.Tab(children=tab_wgts, _titles=tab_names)
display(tab)
# Make graph using pre defined widget values
bjh_plot()
def print_vol(bjh_data, name='PythonVolumeData'):
"""Prints volume data that can be pasted into MicroActive's volume graph"""
print(name + ' (A, cm3/g, cumulative)')
for i in range(0, len(bjh_data.PoreRadii)):
print(bjh_data.PoreRadii[i], bjh_data.cumV[i])
|
gpl-3.0
|
YuepengGuo/zipline
|
zipline/data/ffc/frame.py
|
5
|
6463
|
"""
FFC Loader accepting a DataFrame as input.
"""
from numpy import (
ix_,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
)
from zipline.lib.adjusted_array import adjusted_array
from zipline.lib.adjustment import (
Float64Add,
Float64Multiply,
Float64Overwrite,
)
from zipline.data.ffc.base import FFCLoader
ADD, MULTIPLY, OVERWRITE = range(3)
ADJUSTMENT_CONSTRUCTORS = {
ADD: Float64Add.from_assets_and_dates,
MULTIPLY: Float64Multiply.from_assets_and_dates,
OVERWRITE: Float64Overwrite.from_assets_and_dates,
}
ADJUSTMENT_COLUMNS = Index([
'sid',
'value',
'kind',
'start_date',
'end_date',
'apply_date',
])
class DataFrameFFCLoader(FFCLoader):
"""
An FFCLoader that reads its input from DataFrames.
Mostly useful for testing, but can also be used for real work if your data
fits in memory.
Parameters
----------
column : zipline.data.dataset.BoundColumn
The column whose data is loadable by this loader.
baseline : pandas.DataFrame
A DataFrame with index of type DatetimeIndex and columns of type
Int64Index. Dates should be labelled with the first date on which a
value would be **available** to an algorithm. This means that OHLCV
data should generally be shifted back by a trading day before being
supplied to this class.
adjustments : pandas.DataFrame, default=None
A DataFrame with the following columns:
sid : int
value : any
kind : int (zipline.data.ffc.frame.ADJUSTMENT_TYPES)
start_date : datetime64 (can be NaT)
end_date : datetime64 (must be set)
apply_date : datetime64 (must be set)
The default of None is interpreted as "no adjustments to the baseline".
"""
def __init__(self, column, baseline, adjustments=None):
self.column = column
self.baseline = baseline.values
self.dates = baseline.index
self.assets = baseline.columns
if adjustments is None:
adjustments = DataFrame(
index=DatetimeIndex([]),
columns=ADJUSTMENT_COLUMNS,
)
else:
# Ensure that columns are in the correct order.
adjustments = adjustments.reindex_axis(ADJUSTMENT_COLUMNS, axis=1)
adjustments.sort(['apply_date', 'sid'], inplace=True)
self.adjustments = adjustments
self.adjustment_apply_dates = DatetimeIndex(adjustments.apply_date)
self.adjustment_end_dates = DatetimeIndex(adjustments.end_date)
self.adjustment_sids = Int64Index(adjustments.sid)
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
adjusted_array.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
ADJUSTMENT_CONSTRUCTORS[kind](
dates,
assets,
start_date,
end_date,
sid,
value,
),
)
return out
def load_adjusted_array(self, columns, dates, assets, mask):
"""
Load data from our stored baseline.
"""
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
elif columns[0] != self.column:
raise ValueError("Can't load unknown column %s" % columns[0])
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(assets)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
return [adjusted_array(
# Pull out requested columns/rows from our baseline data.
data=self.baseline[ix_(date_indexer, assets_indexer)],
# Mask out requested columns/rows that didnt match.
mask=(good_assets & good_dates[:, None]) & mask,
adjustments=self.format_adjustments(dates, assets),
)]
|
apache-2.0
|
DonBeo/scikit-learn
|
sklearn/tests/test_pipeline.py
|
6
|
12643
|
"""
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
|
bsd-3-clause
|
MartinThoma/algorithms
|
sorting/timing.py
|
1
|
2814
|
# Core Library modules
import operator
import random
import sys
import timeit
import uuid
from typing import List
# Third party modules
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def main():
size = 10_000
functions = [
(generate_uuid4s, "UUIDv4", size),
(generate_int_strings, "small ints", size),
(generate_int_strings_big, "big ints", size),
(generate_int_strings_36, "36-char ints", size),
(generate_int_strings_numpy, "small ints (numpy)", size),
# (generate_int_strings_numpy_36, "36-char ints (numpy)", 100),
]
functions = functions[::-1]
duration_list = {}
for func, name, size in functions:
durations = timeit.repeat(lambda: func(size), repeat=500, number=1)
duration_list[name] = durations
print(
f"{name:<20}: "
f"min: {min(durations):0.3f}s, "
f"mean: {np.mean(durations):0.3f}s, "
f"max: {max(durations):0.3f}s"
)
create_boxplot(duration_list)
def generate_uuid4s(size: int) -> List[str]:
"""Each string has 36 characters"""
return [str(uuid.uuid4()) for _ in range(size)]
def generate_int_strings(size: int) -> List[str]:
"""Each string has between 0 and 6 characters"""
high = 2 ** 18
return [str(random.randint(0, high)) for _ in range(size)]
def generate_int_strings_big(size: int) -> List[str]:
"""Each string has between 13 and 31 characters"""
low = 2 ** 40
high = 2 ** 100
return [str(random.randint(low, high)) for _ in range(size)]
def generate_int_strings_36(size: int) -> List[str]:
"""Each string has 36 characters"""
low = 10 ** 35
high = 10 ** 36 - 1
return [str(random.randint(low, high)) for _ in range(size)]
def generate_int_strings_numpy(size: int) -> List[str]:
low = 0
high = 2 ** 18
return [str(el) for el in np.random.randint(low, high=high, size=size, dtype="int")]
# def generate_int_strings_numpy_36(size: int) -> List[str]:
# """Each string has 36 characters"""
# low = 10 ** 35
# high = 10 ** 36 - 1
# return [str(el) for el in np.random.randint(low, high=high, size=size, dtype="int")]
def create_boxplot(duration_list):
plt.figure(num=None, figsize=(8, 4), dpi=300, facecolor="w", edgecolor="k")
sns.set(style="whitegrid")
sorted_keys, sorted_vals = zip(
*sorted(duration_list.items(), key=operator.itemgetter(1))
)
flierprops = dict(markerfacecolor="0.75", markersize=1, linestyle="none")
ax = sns.boxplot(data=sorted_vals, width=0.3, orient="h", flierprops=flierprops,)
ax.set(xlabel="Time in s", ylabel="")
plt.yticks(plt.yticks()[0], sorted_keys)
plt.tight_layout()
plt.savefig("output.png")
if __name__ == "__main__":
main()
|
mit
|
Zomega/thesis
|
Wurm/PyWurm/LegDemo.py
|
1
|
2840
|
from math import *
import numpy
import random
import matplotlib.pyplot as plt
from pylab import arange
from random import randint
import numpy as np
import pylab as pl
from matplotlib import collections as mc
from ControlledSystem import ClosedLoopSystem
from Controller import *
from Leg import *
def plot(traj, name1, name2, tau):
T, x1 = traj.getSequence(name1)
T, x2 = traj.getSequence(name2) #[ a for a,b in traj ], [ b for a,b in traj ])
plt.plot(x1, x2, linewidth=2, label='Tau = ' + str(tau))
def pullin( tau, I_dot_max):
def f(t):
t_start = 0
if t < t_start:
return 0
if t < t_start + tau:
return -I_dot_max
if t < t_start + 2 * tau:
return I_dot_max
return 0
return f
phi = 2 * pi / 3.0
I_min = 1
I_max = 10
I_dot_max = 5
leg = Leg(I_min, I_max, I_dot_max, 1, 1, 1, phi, 1)
def trim_traj( traj ):
new_traj = Trajectory()
for time, entry in traj.timeline:
if entry["th_dot"] < 0:
return new_traj
new_traj.register(time, entry)
return new_traj
"""
TAU = []
RETURN = []
tau_l = [ x / 40.0 for x in range(4*19) ]
for tau in tau_l:
leg_w_control = ClosedLoopSystem( leg, TVController(pullin(tau, I_dot_max) ) )
th_0 = -0.7
x_0 = {'I': I_max, 'th_dot': 0, 'th': th_0}
traj = leg_w_control.simulate( x_0, 6 )
traj = trim_traj(traj)
TAU.append(tau)
RETURN.append( -1 * traj.timeline[-1][1]["th"] / th_0 )
print tau, RETURN[-1]
plot( traj, "th", "th_dot", tau )
# ADD General Pendulum info.
plt.plot([0], [0], 'ro')
plt.plot( [th_0,-th_0], [0,0], 'ro')
TH_DOT, TH = np.mgrid[0:0.65:100j, (-phi / 1.9):(phi / 1.9):100j]
TH_DDOT = - ( leg.b * TH_DOT + leg.A * np.sin(TH) + leg.B * np.sin(TH / 2.0 ) ) / I_max
speed = np.sqrt(TH_DOT*TH_DOT + TH_DDOT*TH_DDOT)
lw = 5*speed/speed.max()
plt.vlines([-phi / 2, phi/2],0,0.65)
plt.hlines([0], -phi / 2, phi/2)
plt.streamplot(TH, TH_DOT, TH_DOT, TH_DDOT, color='0.5', linewidth=lw)
plt.legend()
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot(TAU, RETURN)
plt.ylabel("-th_f / th_0")
plt.xlabel("tau")
plt.hlines([1], 0, 2)
plt.show()"""
print "STARTING DRAWING..."
import pygame
clock = pygame.time.Clock()
pygame.init()
size = (width, height) = (600,500)
screen = pygame.display.set_mode(size)
j = 0
t = 0
leg_w_control = ClosedLoopSystem( leg, TVController(pullin(1.274, I_dot_max) ) )
th_0 = - 0.5
x_0 = {'I': I_max, 'th_dot': 0, 'th': th_0}
traj = leg_w_control.simulate( x_0, 6 )
traj = trim_traj(traj)
while 1:
screen.fill((255,255,255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
drawGait(screen, leg, traj, t)
clock.tick(60)
pygame.display.flip()
t += 1/10.0
pygame.image.save(screen, "frame" + str(j) +".png")
j = j+1
|
mit
|
rahulsrma26/code-gems
|
RL/randomWalk/monteCarlo.py
|
1
|
3926
|
import warnings
import matplotlib.pyplot as plt
import numpy as np
class RandomWalk2D:
def __init__(self, grid_size=3, end_states=[(0,0)], rewards=[1], exploration=.1, move_cost=0):
self.n = grid_size
self.end_states = end_states
self.move_cost = move_cost
self.rewards = rewards
self.e = exploration
self.n_actions = 4
self.actions = [(-1,0), (1,0), (0,-1), (0,1)]
# invalid move penality for first time
self.q = np.ones((self.n, self.n, self.n_actions)) * -99999
self.c = np.zeros((self.n, self.n, self.n_actions), dtype=int)
self.policy = np.zeros((self.n,self.n), dtype=int)
for state, value in np.ndenumerate(self.policy):
self.policy[state] = np.random.choice(self.valid_moves(state))
def valid_moves(self, state):
moves = []
if state[0] != 0:
moves.append(0)
if state[0] != self.n - 1:
moves.append(1)
if state[1] != 0:
moves.append(2)
if state[1] != self.n - 1:
moves.append(3)
return moves
def move(self, state):
action = self.policy[state]
if np.random.uniform() < self.e:
action = np.random.choice(self.valid_moves(state))
return tuple([x+y for x,y in zip(state, self.actions[action])]), action
def episode(self):
state = tuple(np.random.random_integers(0, self.n - 1, size=2))
states_actions = set()
while state not in self.end_states:
next_state, action = self.move(state)
states_actions.add((state, action))
state = next_state
return states_actions, self.rewards[self.end_states.index(state)] - self.move_cost*len(states_actions)
def update(self):
states_actions, reward = self.episode()
for s, a in states_actions:
s_a = (s[0], s[1], a)
self.c[s_a] += 1
self.q[s_a] += (reward - self.q[s_a]) / self.c[s_a]
for state,_ in states_actions:
self.policy[state] = np.argmax(self.q[state[0], state[1],:])
def plot(self, axis):
d = 1/(2*self.n)
for y in range(self.n):
for x in range(self.n):
cx, cy = x + .5, y + .5
if (y,x) in self.end_states:
v = self.rewards[self.end_states.index((y,x))]
c = 'coral' if v < 0 else 'lime'
axis.add_artist(plt.Circle((cx, cy), .3, color=c))
axis.text(cx, cy, str(v), fontsize=15, horizontalalignment='center', verticalalignment='center')
else:
dx, dy = 0, 0
m = self.policy[(y,x)]
if m == 0:
dx, dy = 0, -.3
elif m == 1:
dx, dy = 0, .3
elif m == 2:
dx, dy = -.3, 0
elif m == 3:
dx, dy = .3, 0
plt.arrow(cx, cy, dx, dy, head_width=.2, head_length=.2, fc="k", ec="k")
def main():
exp = RandomWalk2D(grid_size=5, exploration=.15, move_cost=.05, \
end_states=[(0,0), (4,4), (1,2), (2,1)], rewards=[2,1,-1,-1])
display_interval = 1000
figure, axis = plt.subplots()
figure.canvas.set_window_title('Monte-Carlo')
axis.set_xlim([0,exp.n])
axis.xaxis.tick_top()
axis.set_ylim([exp.n, 0])
for iter in range(1000):
for sub_iter in range(display_interval):
exp.update()
axis.cla()
exp.plot(axis)
plt.title('Policy Iteration: {0}'.format((iter+1)*display_interval), y=1.08)
axis.set_aspect('equal')
plt.draw()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plt.pause(.001)
plt.show()
if __name__ == '__main__':
main()
|
mit
|
SylvainGuieu/smartplotlib
|
histogram.py
|
1
|
12043
|
from __future__ import division, absolute_import, print_function
from .recursive import KWS, alias
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from .plotclasses import (DataPlot, dataplot, XYPlot, xyplot,
ImgPlot, XYZPlot, xyzplot
)
import numpy as np
import matplotlib.cbook as cbook
import six
histogram = xyplot.derive()
def _statbin(x, values, fstat="mean", bins=10,
range=None, binerror=False,
centered=True, sigma=1.0):
""" return the binned_statistic from scipy.stats
return bins, stats, error, count
stats can is a list of statistic, for instance if fsat="std"
it is 2 stats [mean+std*sigma, mean-std*sigma]
default sigma is one.
"""
## if already handled by binned_statistic or by this func do nothing
# else look in lookup
if isinstance(fstat, basestring) and (fstat not in handled):
try:
fstat = fstat_lookup[fstat]
except KeyError:
raise ValueError("'%s' is not a valid function name should be one of %s"%(fstat,
"'"+"', '".join(set(handled+fstat_lookup.keys()))+"'"
))
# if count bineerror does not make sens
if (fstat is "count") and binerror:
binerror = False
stat, bins, _ = binned_statistic(x, values, fstat, bins, range)
if fstat in fstat_err_lookup:
# compute errors if we can
stat_err, _, _ = binned_statistic(x, values, fstat_err_lookup[fstat], bins, range)
stat_err *= sigma
if binerror:
# if binederror we need also the histogram
N, _, _ = binned_statistic(x, values, count, bins, range)
stat_err = stat_err/np.sqrt(N)
else:
stat_err = None
return bins, stat, stat_err, count
@histogram.initier
def histogram(plot, *args, **kwargs):
""" Plot Factory that compute histogram and maxes a XYPlot
The parameters are the same than the numpy.histogram function.
All other parameters are plots parameters
Computation Parameters
----------------------
data : array like
the one dimentional array of data to build histogram of
bins : array, tuple, int, optional
Array of bins or tuple of (min, max, N) or a integer (default=10)
If an integer, the min(data), max(data) are taken.
range : 2xtuple, optional
min, max boundary for the data. None means no boundary.
e.g: (None,4.5) will take 4.5 as maximum and no minimum constrains
weights : array like, optional
data weights, must be of the same size than data
density : bool, optional
If False (default), the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Specific Plot Parameters
------------------------
bottom : float
the bottom ground value of histogram
align : string
'mid' or 'center', 'right', 'left'
position of bars regarding the bins center.
defult is 'mid'
orientation: string
'vertical' or 'horizontal' orientation of histogram bars.
rwidth : float
The relative width of the plotted bins regarding its phisical
value. Default is 1.0
roffset : float
The relative offset of plotted bars.
Use rwidth, roffset to make some space for other histograms
last : A previous xyplot created with histogram, used to stack histogram
together.
Returns
-------
xyplot : a XYPlot plot colection instance. The following parameters are altered:
Built Parameters
----------------
x : array like
The computed bins data
y : array like
The computed histogram
"""
plot.update(kwargs.pop(KWS,{}), **kwargs)
(data, bins, bin_range, weights, density) = plot.parseargs(args,
"data", "bins", "range", "weights", "density",
bins=10, range=None, weights=None, density=False)
binsgiven = (hasattr(bins, "__iter__") or bin_range is not None)
if not binsgiven:
bin_range = (np.nanmin(data), np.nanmax(data))
m, bins = np.histogram(data, bins, weights=weights, range=bin_range,
density=density
)
if density:
## TODO: compute errorbars for density == True
err = None
else:
err = np.sqrt(m)
m = m.astype(float) # causes problems later if it's an int
plot["data"] = data # set the data, in case it was an alias for instance
_makebinedstatplot(plot, m, bins, err)
plot.goifgo()
def _makebinedstatplot(plot, m, bins, err):
(cumulative, bottom, align,
orientation, rwidth, log,
stacked, rsep, roffset, amplitude, count,
last
) = plot.parseargs([],
"cumulative", "bottom", "align",
"orientation", "rwidth", "log",
"stacked", "rsep", "roffset", "amplitude","count",
"last",
bins=10, range=None, weights=None,
cumulative=False, bottom=None, align='mid',
orientation='vertical', rwidth=1.0, log=False,
stacked=False, rsep=None, roffset=0.0,
amplitude=1,
count=0, last=None
)
if rsep is None:
rsep = rwidth
if stacked:
if last is not None:
if isinstance(last, XYPlot):
try:
last = last["last"]
except KeyError:
raise ValueError("The xyplot given in last parameters does not seems to be from a histogram plot factory")
if np.asarray(last).shape != m.shape:
raise ValueError("Previous histogram given in last parameter does not have the same size of current histogram. Did the bins changed ?")
bottom = last
m += last
lasty = plot.get("lasty", None)
#di index dd data can be "x", "y" or "y", "x"
di, dd = plot._get_direction()
totwidth = np.diff(bins)
if rwidth is not None:
dr = min(1.0, max(0.0, rwidth))
else:
dr = 1.0
if not stacked:
boffset = (rsep*count+roffset)*totwidth
else:
boffset = roffset*totwidth
width = dr*totwidth
realx = (bins[:-1]+bins[1:])/2.
if align in ['mid', "center"]:
xbins = realx
elif align == 'right':
xbins = (realx+(totwidth*(1.-dr))/2.)
else:
xbins = (realx-(totwidth*(1.-dr))/2.)
if bottom is None:
bottom = np.zeros(len(m), np.float)
if stacked:
height = m - bottom
else:
height = m
#xbin = (bins[:-1]+bins[1:])/2.
oxbins = xbins+boffset
hist_plot = height*amplitude
plot.update({di:oxbins, dd:hist_plot,
dd+"min":0, dd+"max":alias("y"),
dd+"err":err
},
hist=m, bins=bins,
last=alias("y"),
lasty=alias("y"),
count=count+1,
)
plot.step.update(where="mid")
plot.bar.update(align="center",
edge=oxbins,
height=hist_plot, width=width,
base=bottom, rwidth=1.0,
yerr=None, xerr=None
)
plot.fillstep.update(x=realx)
plot.fill_between.update(indexes=xbins,
data1=alias("y") if lasty is None else lasty,
data2=alias("y")
)
#if di == "x":
# plot.update(left=alias("edge"), bottom=alias("base"),
# height=alias("length"))
#else:
# plot.update(left=alias("base"), bottom=alias("left"),
# height=alias("length"))
# these define the perimeter of the polygon
x = np.zeros(4 * len(bins) - 3, np.float)
y = np.zeros(4 * len(bins) - 3, np.float)
x[0:2*len(bins)-1:2], x[1:2*len(bins)-1:2] = bins, bins[:-1]
x[2*len(bins)-1:] = x[1:2*len(bins)-1][::-1]
if bottom is None:
bottom = np.zeros(len(bins)-1, np.float)
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = bottom, bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
if log:
# Setting a minimum of 0 results in problems for log plots
if density or weights is not None:
# For normed data, set to log base * minimum data value
# (gives 1 full tick-label unit for the lowest filled bin)
ndata = np.array(n)
minimum = (np.min(ndata[ndata > 0])) / logbase
else:
# For non-normed data, set the min to log base,
# again so that there is 1 full tick-label unit
# for the lowest bin
minimum = 1.0 / logbase
y[0], y[-1] = minimum, minimum
else:
minimum = np.min(bins)
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
# If fill kwarg is set, it will be passed to the patch collection,
# overriding this
xvals, yvals = [], []
if stacked:
# starting point for drawing polygon
y[0] = y[1]
# top of the previous polygon becomes the bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
# set the top of this polygon
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = (m + bottom,
m + bottom)
if log:
y[y < minimum] = minimum
plot.fill.update({di:x, dd:y})
@xyzplot.decorate()
def histogram2d(plot, *args, **kwargs):
plot.update(kwargs.pop(KWS, {}), **kwargs)
(x, y,
bins, bin_range, normed, weights,
cmin, cmax) = plot.parseargs(args, "x", "y",
"bins", "range", "normed", "weights",
"cmin", "cmax",
bins=10, range=None, normed=False, weights=None,
cmin=None, cmax=None)
h, xedges, yedges = np.histogram2d(x, y, bins=bins, range=bin_range,
normed=normed, weights=weights)
if cmin is not None:
h[h < cmin] = None
if cmax is not None:
h[h > cmax] = None
X, Y = np.meshgrid(xedges, yedges)
plot.update(xedges=xedges, yedges=yedges, hist=h,
x=alias("xedges"), y=alias("yedges"),
X=X[:-1,:-1], Y=Y[:-1,:-1],
colors=h.T,
Z = h.T
)
plot.contour.update(colors=None)
plot.contourf.update(colors=None)
plot.imshow.update(extent=(xedges.min(),xedges.max(),yedges.min(),yedges.max()))
plot.goifgo()
histogram["_example_"] = ("histogram", None)
DataPlot.histogram = histogram
ImgPlot.histogram = histogram.derive(data=alias(lambda p: np.asarray(p["img"]).flatten()))
XYZPlot.histogram = histogram.derive(data=alias(lambda p: np.asarray(p["z"]).flatten()))
XYPlot.yhistogram2y = histogram.derive(data=alias("y"),
direction="y"
)
XYPlot.yhistogram2x = histogram.derive(data=alias("y"),
direction="x"
)
XYPlot.xhistogram2y = histogram.derive(data=alias("x"),
direction="x"
)
XYPlot.xhistogram2x = histogram.derive(data=alias("x"),
direction="x"
)
XYPlot.histogram2d = histogram2d
|
gpl-2.0
|
bikong2/scikit-learn
|
sklearn/feature_extraction/text.py
|
110
|
50157
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
bsd-3-clause
|
saifrahmed/bokeh
|
bokeh/cli/utils.py
|
42
|
8119
|
from __future__ import absolute_import, print_function
from collections import OrderedDict
from six.moves.urllib import request as urllib2
import io
import pandas as pd
from .. import charts
from . import help_messages as hm
def keep_source_input_sync(filepath, callback, start=0):
""" Monitor file at filepath checking for new lines (similar to
tail -f) and calls callback on every new line found.
Args:
filepath (str): path to the series data file (
i.e.: /source/to/my/data.csv)
callback (callable): function to be called with the a DataFrame
created from the new lines found from file at filepath
starting byte start
start (int): specifies where to start reading from the file at
filepath.
Default: 0
Returns:
DataFrame created from data read from filepath
"""
if filepath is None:
msg = "No Input! Please specify --source_filename or --buffer t"
raise IOError(msg)
if filepath.lower().startswith('http'):
# Create a request for the given URL.
while True:
request = urllib2.Request(filepath)
data = get_data_from_url(request, start)
f = io.BytesIO(data)
f.seek(start)
line = f.readline() # See note below
if not line:
continue # No data, try again
callback(line)
start = len(data)
else:
f = open(filepath, 'r')
f.seek(start)
while True:
line = f.readline() # See note below
if not line:
continue # No data, try again
callback(line)
source = pd.read_csv(filepath)
return source
# Try to get the response. This will raise a urllib2.URLError if there is a
# problem (e.g., invalid URL).
# Reference:
# - http://stackoverflow.com/questions/5209087/python-seek-in-http-response-stream
# - http://stackoverflow.com/questions/1971240/python-seek-on-remote-file-using-http
def get_data_from_url(request, start=0, length=0):
""" Read from request after adding headers to retrieve data from byte
specified in start.
request (urllib2.Request): request object related to the data to read
start (int, optional): byte to start reading from.
Default: 0
length: length of the data range to read from start. If 0 it reads
until the end of the stream.
Default: 0
Returns:
String read from request
"""
# Add the header to specify the range to download.
if start and length:
request.add_header("Range", "bytes=%d-%d" % (start, start + length - 1))
elif start:
request.add_header("Range", "bytes=%s-" % start)
response = urllib2.urlopen(request)
# If a content-range header is present, partial retrieval worked.
if "content-range" in response.headers:
print("Partial retrieval successful.")
# The header contains the string 'bytes', followed by a space, then the
# range in the format 'start-end', followed by a slash and then the total
# size of the page (or an asterix if the total size is unknown). Lets get
# the range and total size from this.
_range, total = response.headers['content-range'].split(' ')[-1].split('/')
# Print a message giving the range information.
if total == '*':
print("Bytes %s of an unknown total were retrieved." % _range)
else:
print("Bytes %s of a total of %s were retrieved." % (_range, total))
# # No header, so partial retrieval was unsuccessful.
# else:
# print "Unable to use partial retrieval."
data = response.read()
return data
def parse_output_config(output):
"""Parse the output specification string and return the related chart
output attribute.
Attr:
output (str): String with the syntax convention specified for the
cli output option is as follows: <output_type>://<type_arg>
Valid values:
output_type: file or server
type_arg:
file_path if output_type is file
serve path if output_type is server
Returns:
dictionary containing the output arguments to pass to a chart object
"""
output_type, output_options = output.split('://')
if output_type == 'file':
return {'filename': output_options}
elif output_type == 'server':
# TODO: check if server configuration is as flexible as with plotting
# interface and add support for url/name if so.
out_opt = output_options.split("@")
attrnames = ['server', 'url', 'name']
# unpack server output parametrs in order to pass them to the plot
# creation function
kws = dict((attrn, val) for attrn, val in zip( attrnames, out_opt))
return {'server': kws['server']}
else:
msg = "Unknown output type %s found. Please use: file|server"
print (msg % output_type)
return {}
def get_chart_params(title, output, show_legend=False):
"""Parse output type and output options and return related chart
parameters. For example: returns filename if output_type is file
or server it output_type is server
Args:
title (str): the title of your plot.
output (str): selected output. Follows the following convention:
<output_type>://<type_arg> where output_type can be
`file` (in that case type_arg specifies the file path) or
`server` (in that case type_arg specify the server name).
Returns:
dictionary containing the arguments to pass to a chart object
related to title and output options
"""
params = {'title': title, 'legend': show_legend}
output_params = parse_output_config(output)
if output_params:
params.update(output_params)
return params
def get_data_series(series, source, indexes):
"""Generate an OrderedDict from the source series excluding index
and all series not specified in series.
Args:
series (list(str)): list of strings specifying the names of the
series to keep from source
source (DataFrame): pandas DataFrame with the data series to be
plotted
indexes (lst(str)): name of the series of source to be used as index.
Returns:
OrderedDict with the data series from source
"""
series = define_series(series, source, indexes)
# generate charts data
data_series = OrderedDict()
for i, colname in enumerate(series+indexes):
try:
data_series[colname] = source[colname]
except KeyError:
raise KeyError(hm.ERR_MSG_SERIES_NOT_FOUND % (colname, source.keys()))
return data_series
def define_series(series, source, indexes):
"""If series is empty returns source_columns excluding the column
where column == index. Otherwise returns the series.split(',')
Args:
series (str): string that contains the names of the
series to keep from source, separated by `,`
source (DataFrame): pandas DataFrame with the data series to be
plotted
indexes (lst(str)): name of the series of source to be used as index.
Returns:
list of the names (as str) of the series except index
"""
if not series:
return [c for c in source.columns if c not in indexes]
else:
return series.split(',')
def get_charts_mapping():
"""Return a dict with chart classes names (lower case) as keys and
their related class as values.
Returns:
dict mapping chart classes names to chart classes
"""
mapping = {}
for (clsname, cls) in charts.__dict__.items():
try:
# TODO: We may need to restore the objects filtering
# when charts creators (or builders registration) is added
# to the charts API
mapping[clsname.lower()] = cls
except TypeError:
pass
return mapping
|
bsd-3-clause
|
quantopian/zipline
|
tests/utils/test_cache.py
|
3
|
2166
|
from unittest import TestCase
from pandas import Timestamp, Timedelta
from zipline.utils.cache import CachedObject, Expired, ExpiringCache
class CachedObjectTestCase(TestCase):
def test_cached_object(self):
expiry = Timestamp('2014')
before = expiry - Timedelta('1 minute')
after = expiry + Timedelta('1 minute')
obj = CachedObject(1, expiry)
self.assertEqual(obj.unwrap(before), 1)
self.assertEqual(obj.unwrap(expiry), 1) # Unwrap on expiry is allowed.
with self.assertRaises(Expired) as e:
obj.unwrap(after)
self.assertEqual(e.exception.args, (expiry,))
def test_expired(self):
always_expired = CachedObject.expired()
for dt in Timestamp.min, Timestamp.now(), Timestamp.max:
with self.assertRaises(Expired):
always_expired.unwrap(dt)
class ExpiringCacheTestCase(TestCase):
def test_expiring_cache(self):
expiry_1 = Timestamp('2014')
before_1 = expiry_1 - Timedelta('1 minute')
after_1 = expiry_1 + Timedelta('1 minute')
expiry_2 = Timestamp('2015')
after_2 = expiry_1 + Timedelta('1 minute')
expiry_3 = Timestamp('2016')
cache = ExpiringCache()
cache.set('foo', 1, expiry_1)
cache.set('bar', 2, expiry_2)
self.assertEqual(cache.get('foo', before_1), 1)
# Unwrap on expiry is allowed.
self.assertEqual(cache.get('foo', expiry_1), 1)
with self.assertRaises(KeyError) as e:
self.assertEqual(cache.get('foo', after_1))
self.assertEqual(e.exception.args, ('foo',))
# Should raise same KeyError after deletion.
with self.assertRaises(KeyError) as e:
self.assertEqual(cache.get('foo', before_1))
self.assertEqual(e.exception.args, ('foo',))
# Second value should still exist.
self.assertEqual(cache.get('bar', after_2), 2)
# Should raise similar KeyError on non-existent key.
with self.assertRaises(KeyError) as e:
self.assertEqual(cache.get('baz', expiry_3))
self.assertEqual(e.exception.args, ('baz',))
|
apache-2.0
|
bokeh/bokeh
|
examples/plotting/file/eclipse.py
|
1
|
2447
|
# Based on https://www.reddit.com/r/dataisbeautiful/comments/6qnkg0/google_search_interest_follows_the_path_of_the/
import pandas as pd
import shapefile as shp
from bokeh.models import ColorBar, ColumnDataSource, Label, LinearColorMapper
from bokeh.palettes import YlOrRd5
from bokeh.plotting import figure, show
from bokeh.sampledata.us_states import data
states = pd.DataFrame.from_dict(data, orient="index")
states.drop(["AK", "HI"], inplace=True)
trends = pd.read_csv("eclipse_data/trends.csv")
states.set_index("name", inplace=True)
trends.set_index("Region", inplace=True)
states["trend"] = trends["solar eclipse"]
upath17 = shp.Reader("eclipse_data/upath17")
(totality_path,) = upath17.shapes()
p = figure(width=1000, height=600, background_fill_color="#333344",
tools="", toolbar_location=None, x_axis_location=None, y_axis_location=None)
p.grid.grid_line_color = None
p.title.text = "Google Search Trends and the Path of Solar Eclipse, 21 August 2017"
p.title.align = "center"
p.title.text_font_size = "21px"
p.title.text_color = "#333344"
mapper = LinearColorMapper(palette=list(reversed(YlOrRd5)), low=0, high=100)
source = ColumnDataSource(data=dict(
state_xs=list(states.lons),
state_ys=list(states.lats),
trend=states.trend,
))
us = p.patches("state_xs", "state_ys",
fill_color=dict(field="trend", transform=mapper),
source=source,
line_color="#333344", line_width=1)
p.x_range.renderers = [us]
p.y_range.renderers = [us]
totality_x, totality_y = zip(*totality_path.points)
p.patch(totality_x, totality_y,
fill_color="black", fill_alpha=0.7,
line_color=None)
path = Label(
x=-76.3, y=31.4,
angle=-36.5, angle_units="deg",
text="Solar eclipse path of totality",
text_baseline="middle", text_font_size="11px", text_color="silver")
p.add_layout(path)
color_bar = ColorBar(
color_mapper=mapper,
location="bottom_left", orientation="horizontal",
title="Popularity of \"solar eclipse\" search term",
title_text_font_size="16px", title_text_font_style="bold",
title_text_color="lightgrey", major_label_text_color="lightgrey",
background_fill_alpha=0.0)
p.add_layout(color_bar)
notes = Label(
x=0, y=0, x_units="screen", y_units="screen",
x_offset=40, y_offset=20,
text="Source: Google Trends, NASA Scientific Visualization Studio",
level="overlay",
text_font_size="11px", text_color="gray")
p.add_layout(notes)
show(p)
|
bsd-3-clause
|
theoryno3/pylearn2
|
pylearn2/scripts/tests/test_print_monitor_cv.py
|
48
|
1927
|
"""
Test print_monitor_cv.py by training on a short TrainCV YAML file and
analyzing the output pickle.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.scripts import print_monitor_cv
from pylearn2.testing.skip import skip_if_no_sklearn
def test_print_monitor_cv():
"""Test print_monitor_cv.py."""
skip_if_no_sklearn()
handle, filename = tempfile.mkstemp()
trainer = yaml_parse.load(test_print_monitor_cv_yaml %
{'filename': filename})
trainer.main_loop()
# run print_monitor_cv.py main
print_monitor_cv.main(filename)
# run print_monitor_cv.py main with all=True
print_monitor_cv.main(filename, all=True)
# cleanup
os.remove(filename)
test_print_monitor_cv_yaml = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 8,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 2,
irange: 0.05,
},
],
nvis: 10,
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 5,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
save_path: %(filename)s,
}
"""
|
bsd-3-clause
|
rolando/theusual-kaggle-seeclickfix-ensemble
|
Bryan/train.py
|
2
|
12289
|
"""
Functions for training estimators, performing cross validation, and making predictions
"""
__author__ = 'Bryan Gregory'
__email__ = '[email protected]'
__date__ = '11-19-2013'
#Internal modules
import utils
#Start logger to record all info, warnings, and errors to Logs/logfile.log
log = utils.start_logging(__name__)
import ml_metrics
#External modules
import time
from datetime import datetime
from sklearn import (metrics, cross_validation, linear_model, preprocessing)
from sklearn.externals import joblib
import numpy as np
from scipy import sparse
from scipy.sparse import coo_matrix, hstack, vstack
#-----Run Cross Validation Steps-----#
def cross_validate(model, settings, dfTrn_Segment, dfTest_Segment):
#Combine the train and test feature matrices and create targets
mtxTrn, mtxTest, mtxTrnTarget, mtxTestTarget = combine_features(model, dfTrn_Segment, dfTest_Segment)
#Run CV
if settings['cv_method'] in ['march','april','list_split']:
cv_preds = cross_validate_temporal(mtxTrn,mtxTest,mtxTrnTarget.ravel(),mtxTestTarget.ravel(),model)
if settings['cv_method'] in ['kfold']:
cv_preds = cross_validate_kfold(mtxTrn,mtxTest,mtxTrnTarget.ravel(),mtxTestTarget.ravel(),model)
dfTest_Segment[model.target] = [x for x in cv_preds]
#-----Combine the train and test feature matrices and create targets-----#
def combine_features(model, dfTrn, dfTest):
#Create targets
mtxTrnTarget = dfTrn.ix[:,[model.target]].as_matrix()
mtxTestTarget = dfTest.ix[:,[model.target]].as_matrix()
#Combine train and test features
for feature in model.features:
if 'mtxTrn' in locals():
#if not the first feature in the list, then add the current feature
mtxTrn = hstack([mtxTrn, model.features[feature][0]])
mtxTest = hstack([mtxTest, model.features[feature][1]])
else:
#if the first feature in the list, then create the matrices
mtxTrn = model.features[feature][0]
mtxTest = model.features[feature][1]
return mtxTrn, mtxTest, mtxTrnTarget, mtxTestTarget
#---Traditional K-Fold Cross Validation----#
def cross_validate_kfold(mtxTrn,mtxTarget,model,folds=5,SEED=42,test_size=.15,pred_fg='false'):
fold_scores = []
SEED = SEED * time.localtime().tm_sec
start_time = datetime.now()
log.info('K-Fold CV started at: %s' % (datetime.now().strftime('%m-%d-%y %H:%M')))
utils.line_break()
#If predictions are wanted, initialize the dict so that its length will match all records in the training set,
#even if not all records are predicted during the CV (randomness is a bitch)
if pred_fg == 'true':
cv_preds = {key[0]:[] for key in mtxTrn.getcol(0).toarray()}
for i in range(folds):
##For each fold, create a test set (test_cv) by randomly holding out test_size% of the data as CV set
train_cv, test_cv, y_target, y_true = \
cross_validation.train_test_split(mtxTrn, mtxTarget, test_size=test_size, random_state=i*SEED+1)
#If target variable has been transformed, transform y_true back to normal state for comparison to predictions
y_true = [np.exp(x)-1 for x in y_true]
#if predictions are wanted, parse off the first row from train and test cv sets. First row contains ID
if pred_fg == 'true':
#TODO: create dense matrix copies for the clf's that only use dense matrices
train_cv = sparse.csr_matrix(train_cv)[:,1:]
test_cv2 = sparse.csr_matrix(test_cv)[:,1:]
test_cv = sparse.csr_matrix(test_cv)[:,1:]
#----------Hyperparameter optimization------#
try:
model.estimator.fit(train_cv, y_target)
preds = model.estimator.predict(test_cv)
except TypeError:
model.estimator.fit(train_cv.todense(), y_target)
preds = model.estimator.predict(test_cv.todense())
preds = model.estimator.predict(test_cv)
#----------Post processing rules----------#
#If target variable has been transformed, transform predictions back to original state
preds = [np.exp(x)-1 for x in preds]
#Apply scalar
if model.postprocess_scalar != 1:
preds = [x*model.postprocess_scalar for x in preds]
#set <0 predictions to 0 if views or comments, set <1 predictions to 1 if votes
if model.target == 'num_votes':
preds = [1 if x < 1 else x for x in preds]
else:
preds = [0 if x < 0 else x for x in preds]
##For each fold, score the prediction by measuring the error using the chosen error metric
score = ml_metrics.rmsle(y_true, preds)
fold_scores += [score]
log.info('RMLSE (fold %d/%d): %f' % (i + 1, folds, score))
##IF we want to record predictions, then for each fold add the predictions to the cv_preds dict for later output
if pred_fg == 'true':
for i in range(0,test_cv2.shape[0]):
if test_cv2.getcol(0).toarray()[i][0] in cv_preds.keys():
cv_preds[test_cv2.getcol(0).toarray()[i][0]] += [preds[i]]
else:
cv_preds[test_cv2.getcol(0).toarray()[i][0]] = [preds[i]]
##Now that folds are complete, calculate and print the results
finish_time = datetime.now()
log.info('Prediction metrics: mean=%f, std dev=%f, min/max= %f/%f' %
(np.mean(fold_scores)), (np.max(fold_scores),np.std(fold_scores),np.min(fold_scores),np.max(fold_scores)))
utils.line_break()
log.info('K-Fold CV completed at: %s. Total runtime: %s' % (datetime.now().strftime('%m-%d-%y %H:%M'),
str(finish_time-start_time)))
utils.line_break()
if pred_fg == 'true':
return cv_preds
#---Temporal cross validation---#
def cross_validate_temporal(mtxTrn,mtxTest,mtxTrnTarget,mtxTestTarget,model):
start_time = datetime.now()
log.info('Temporal CV started at: %s' % (datetime.now().strftime('%m-%d-%y %H:%M')))
utils.line_break()
train_cv = mtxTrn
test_cv = mtxTest
y_target = mtxTrnTarget
y_true = mtxTestTarget
#If target variable has been transformed, transform y_true back to normal state for comparison to predictions
y_true = [np.exp(x)-1 for x in y_true]
#--------Hyperparameter optimization---------#
#Make predictions
try:
model.estimator.fit(train_cv, y_target)
preds = model.estimator.predict(test_cv)
except TypeError:
model.estimator.fit(train_cv.todense(), y_target)
preds = model.estimator.predict(test_cv.todense())
#----------Post processing rules----------#
#If target variable has been transformed, transform predictions back to original state
preds = [np.exp(x)-1 for x in preds]
#Apply scalar
if model.postprocess_scalar != 1:
preds = [x*model.postprocess_scalar for x in preds]
#set <0 predictions to 0 if views or comments, set <1 predictions to 1 if votes
if model.target == 'num_votes':
preds = [1 if x < 1 else x for x in preds]
else:
preds = [0 if x < 0 else x for x in preds]
##score the prediction by measuring the error using the chosen error metric
score = ml_metrics.rmsle(y_true, preds)
finish_time = datetime.now()
log.info('Error Measure:' , score)
log.info('Prediction metrics: mean=%f, std dev=%f, min/max= %f/%f' %
(np.mean(preds)), (np.max(preds),np.std(preds),np.min(preds),np.max(preds)))
utils.line_break()
log.info('Temporal CV completed at: %s. Total runtime: %s' \
% (datetime.now().strftime('%m-%d-%y %H:%M'),str(finish_time-start_time)))
utils.line_break()
return preds
def cross_validate_using_benchmark(benchmark_name, dfTrn, mtxTrn,mtxTarget,model,folds=5,SEED=42,test_size=.15):
fold_scores = []
SEED = SEED * time.localtime().tm_sec
start_time = datetime.now()
log.info('Benchmark CV started at: %s' % (datetime.now().strftime('%m-%d-%y %H:%M')))
utils.line_break()
for i in range(folds):
#For each fold, create a test set (test_holdout) by randomly holding out X% of the data as CV set, where X is test_size (default .15)
train_cv, test_cv, y_target, y_true = cross_validation.train_test_split(mtxTrn, mtxTarget, test_size=test_size, random_state=SEED*i+10)
#If target variable has been transformed, transform y_true back to normal state for comparison to predictions
y_true = [np.exp(x)-1 for x in y_true]
#Calc benchmarks and use them to make a prediction
benchmark_preds = 0
if benchmark_name =='global_mean':
benchmark_preds = [13.899 for x in test_cv]
if benchmark_name =='all_ones':
#find user avg stars mean
benchmark_preds = [1 for x in test_cv]
if benchmark_name =='9999':
#find user avg stars mean
benchmark_preds = [9999 for x in test_cv]
log.info('Using benchmark %s:' % (benchmark_name))
#For this CV fold, measure the error
score = ml_metrics.rmsle(y_true, benchmark_preds)
#print score
fold_scores += [score]
log.info('RMSLE (fold %d/%d): %f' % (i + 1, folds, score))
##Now that folds are complete, calculate and print the results
finish_time = datetime.now()
log.info('Prediction metrics: mean=%f, std dev=%f, min/max= %f/%f' %
(np.mean(fold_scores)), (np.max(fold_scores),np.std(fold_scores),np.min(fold_scores),np.max(fold_scores)))
utils.line_break()
log.info('CV completed at: %s. Total runtime: %s' % (datetime.now().strftime('%m-%d-%y %H:%M'),
str(finish_time-start_time)))
utils.line_break()
def predict(mtxTrn,mtxTarget,mtxTest,dfTest,model):
start_time = datetime.now()
log.info('Predictions started at: %s' % (datetime.now().strftime('%m-%d-%y %H:%M')))
try:
#make predictions on test data and store them in the test data frame
model.estimator.fit(mtxTrn, mtxTarget)
dfTest[model.target] = [x for x in model.estimator.predict(mtxTest)]
except TypeError:
model.estimator.fit(mtxTrn.todense(), mtxTarget)
dfTest[model.target] = [x for x in model.estimator.predict(mtxTest.todense())]
#---------Post processing rules--------------#
#If target variable has been transformed, transform predictions back to original state
dfTest[model.target] = [np.exp(x) - 1 for x in dfTest[model.target]]
#Apply scalar
if model.postprocess_scalar != 1:
dfTest[model.target] = [x*model.postprocess_scalar for x in dfTest[model.target]]
#set <0 predictions to 0 if views or comments, set <1 predictions to 1 if votes
if model.target == 'num_votes':
dfTest[model.target] = [1 if x < 1 else x for x in dfTest[model.target]]
else:
dfTest[model.target] = [0 if x < 0 else x for x in dfTest[model.target]]
#print 'Coefs for',model.estimator_name,model.estimator.coef_
finish_time = datetime.now()
log.info('Prediction metrics: mean=%f, std dev=%f, min/max= %f/%f' %
(np.mean(dfTest[model.target]), np.std(dfTest[model.target]),np.min(dfTest[model.target]),
np.max(dfTest[model.target])))
log.info('Predictions completed at: %s. Total runtime: %s' % (datetime.now().strftime('%m-%d-%y %H:%M'),
str(finish_time-start_time)))
return dfTest
#---Calculate the variance between ground truth and the mean of the CV predictions.----#
#---Adds the average cv variance to the training dataframe for later analysis--------------------#
def calc_cv_preds_var(df, cv_preds):
df['cv_preds_var'] = ''
df['cv_preds_mean'] = ''
for key in cv_preds.keys():
df['cv_preds_var'][df.urlid == key] = abs(df[df.urlid == key].label.values[0] - np.mean(cv_preds[key]))
df['cv_preds_mean'][df.urlid == key] = np.mean(cv_preds[key])
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.