repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bjodah/PubChemPy
|
pubchempy.py
|
1
|
44488
|
# -*- coding: utf-8 -*-
"""
PubChemPy
Python interface for the PubChem PUG REST service.
https://github.com/mcs07/PubChemPy
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import functools
import json
import logging
import os
import sys
import time
import warnings
try:
from urllib.error import HTTPError
from urllib.parse import quote, urlencode
from urllib.request import urlopen
except ImportError:
from urllib import urlencode
from urllib2 import quote, urlopen, HTTPError
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
__author__ = 'Matt Swain'
__email__ = '[email protected]'
__version__ = '1.0.3'
__license__ = 'MIT'
API_BASE = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug'
log = logging.getLogger('pubchempy')
log.addHandler(logging.NullHandler())
if sys.version_info[0] == 3:
text_types = str, bytes
else:
text_types = basestring,
def request(identifier, namespace='cid', domain='compound', operation=None, output='JSON', searchtype=None, **kwargs):
"""
Construct API request from parameters and return the response.
Full specification at http://pubchem.ncbi.nlm.nih.gov/pug_rest/PUG_REST.html
"""
# If identifier is a list, join with commas into string
if isinstance(identifier, int):
identifier = str(identifier)
if not isinstance(identifier, text_types):
identifier = ','.join(str(x) for x in identifier)
# Filter None values from kwargs
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
# Build API URL
urlid, postdata = None, None
if namespace == 'sourceid':
identifier = identifier.replace('/', '.')
if namespace in ['listkey', 'formula', 'sourceid'] or (searchtype and namespace == 'cid') or domain == 'sources':
urlid = quote(identifier.encode('utf8'))
else:
postdata = urlencode([(namespace, identifier)]).encode('utf8')
comps = filter(None, [API_BASE, domain, searchtype, namespace, urlid, operation, output])
apiurl = '/'.join(comps)
if kwargs:
apiurl += '?%s' % urlencode(kwargs)
# Make request
try:
log.debug('Request URL: %s', apiurl)
log.debug('Request data: %s', postdata)
response = urlopen(apiurl, postdata)
return response
except HTTPError as e:
raise PubChemHTTPError(e)
def get(identifier, namespace='cid', domain='compound', operation=None, output='JSON', searchtype=None, **kwargs):
"""Request wrapper that automatically handles async requests."""
if searchtype or namespace in ['formula']:
response = request(identifier, namespace, domain, None, 'JSON', searchtype, **kwargs).read()
status = json.loads(response.decode())
if 'Waiting' in status and 'ListKey' in status['Waiting']:
identifier = status['Waiting']['ListKey']
namespace = 'listkey'
while 'Waiting' in status and 'ListKey' in status['Waiting']:
time.sleep(2)
response = request(identifier, namespace, domain, operation, 'JSON', **kwargs).read()
status = json.loads(response.decode())
if not output == 'JSON':
response = request(identifier, namespace, domain, operation, output, searchtype, **kwargs).read()
else:
response = request(identifier, namespace, domain, operation, output, searchtype, **kwargs).read()
return response
def get_json(identifier, namespace='cid', domain='compound', operation=None, searchtype=None, **kwargs):
"""Request wrapper that automatically parses JSON response and supresses NotFoundError."""
try:
return json.loads(get(identifier, namespace, domain, operation, 'JSON', searchtype, **kwargs).decode())
except NotFoundError as e:
log.info(e)
return None
def get_compounds(identifier, namespace='cid', searchtype=None, as_dataframe=False, **kwargs):
"""Retrieve the specified compound records from PubChem.
:param identifier: The compound identifier to use as a search query.
:param namespace: (optional) The identifier type, one of cid, name, smiles, sdf, inchi, inchikey or formula.
:param searchtype: (optional) The advanced search type, one of substructure, superstructure or similarity.
:param as_dataframe: (optional) Automatically extract the :class:`~pubchempy.Compound` properties into a pandas
:class:`~pandas.DataFrame` and return that.
"""
results = get_json(identifier, namespace, searchtype=searchtype, **kwargs)
compounds = [Compound(r) for r in results['PC_Compounds']] if results else []
if as_dataframe:
return compounds_to_frame(compounds)
return compounds
def get_substances(identifier, namespace='sid', as_dataframe=False, **kwargs):
"""Retrieve the specified substance records from PubChem.
:param identifier: The substance identifier to use as a search query.
:param namespace: (optional) The identifier type, one of sid, name or sourceid/<source name>.
:param as_dataframe: (optional) Automatically extract the :class:`~pubchempy.Substance` properties into a pandas
:class:`~pandas.DataFrame` and return that.
"""
results = get_json(identifier, namespace, 'substance', **kwargs)
substances = [Substance(r) for r in results['PC_Substances']] if results else []
if as_dataframe:
return substances_to_frame(substances)
return substances
def get_assays(identifier, namespace='aid', **kwargs):
"""Retrieve the specified assay records from PubChem.
:param identifier: The assay identifier to use as a search query.
:param namespace: (optional) The identifier type.
"""
results = get_json(identifier, namespace, 'assay', 'description', **kwargs)
return [Assay(r) for r in results['PC_AssayContainer']] if results else []
# Allows properties to optionally be specified as underscore_separated, consistent with Compound attributes
PROPERTY_MAP = {
'molecular_formula': 'MolecularFormula',
'molecular_weight': 'MolecularWeight',
'canonical_smiles': 'CanonicalSMILES',
'isomeric_smiles': 'IsomericSMILES',
'inchi': 'InChI',
'inchikey': 'InChIKey',
'iupac_name': 'IUPACName',
'xlogp': 'XLogP',
'exact_mass': 'ExactMass',
'monoisotopic_mass': 'MonoisotopicMass',
'tpsa': 'TPSA',
'complexity': 'Complexity',
'charge': 'Charge',
'h_bond_donor_count': 'HBondDonorCount',
'h_bond_acceptor_count': 'HBondAcceptorCount',
'rotatable_bond_count': 'RotatableBondCount',
'heavy_atom_count': 'HeavyAtomCount',
'isotope_atom_count': 'IsotopeAtomCount',
'atom_stereo_count': 'AtomStereoCount',
'defined_atom_stereo_count': 'DefinedAtomStereoCount',
'undefined_atom_stereo_count': 'UndefinedAtomStereoCount',
'bond_stereo_count': 'BondStereoCount',
'defined_bond_stereo_count': 'DefinedBondStereoCount',
'undefined_bond_stereo_count': 'UndefinedBondStereoCount',
'covalent_unit_count': 'CovalentUnitCount',
'volume_3d': 'Volume3D',
'conformer_rmsd_3d': 'ConformerModelRMSD3D',
'conformer_model_rmsd_3d': 'ConformerModelRMSD3D',
'x_steric_quadrupole_3d': 'XStericQuadrupole3D',
'y_steric_quadrupole_3d': 'YStericQuadrupole3D',
'z_steric_quadrupole_3d': 'ZStericQuadrupole3D',
'feature_count_3d': 'FeatureCount3D',
'feature_acceptor_count_3d': 'FeatureAcceptorCount3D',
'feature_donor_count_3d': 'FeatureDonorCount3D',
'feature_anion_count_3d': 'FeatureAnionCount3D',
'feature_cation_count_3d': 'FeatureCationCount3D',
'feature_ring_count_3d': 'FeatureRingCount3D',
'feature_hydrophobe_count_3d': 'FeatureHydrophobeCount3D',
'effective_rotor_count_3d': 'EffectiveRotorCount3D',
'conformer_count_3d': 'ConformerCount3D',
}
def get_properties(properties, identifier, namespace='cid', searchtype=None, as_dataframe=False, **kwargs):
"""Retrieve the specified properties from PubChem.
:param identifier: The compound, substance or assay identifier to use as a search query.
:param namespace: (optional) The identifier type.
:param searchtype: (optional) The advanced search type, one of substructure, superstructure or similarity.
:param as_dataframe: (optional) Automatically extract the properties into a pandas :class:`~pandas.DataFrame`.
"""
if isinstance(properties, text_types):
properties = properties.split(',')
properties = ','.join([PROPERTY_MAP.get(p, p) for p in properties])
properties = 'property/%s' % properties
results = get_json(identifier, namespace, 'compound', properties, searchtype=searchtype, **kwargs)
results = results['PropertyTable']['Properties'] if results else []
if as_dataframe:
import pandas as pd
return pd.DataFrame.from_records(results, index='CID')
return results
def get_synonyms(identifier, namespace='cid', domain='compound', searchtype=None, **kwargs):
results = get_json(identifier, namespace, domain, 'synonyms', searchtype=searchtype, **kwargs)
return results['InformationList']['Information'] if results else []
def get_cids(identifier, namespace='name', domain='compound', searchtype=None, **kwargs):
results = get_json(identifier, namespace, domain, 'cids', searchtype=searchtype, **kwargs)
if not results:
return []
elif 'IdentifierList' in results:
return results['IdentifierList']['CID']
elif 'InformationList' in results:
return results['InformationList']['Information']
def get_sids(identifier, namespace='cid', domain='compound', searchtype=None, **kwargs):
results = get_json(identifier, namespace, domain, 'sids', searchtype=searchtype, **kwargs)
if not results:
return []
elif 'IdentifierList' in results:
return results['IdentifierList']['SID']
elif 'InformationList' in results:
return results['InformationList']['Information']
def get_aids(identifier, namespace='cid', domain='compound', searchtype=None, **kwargs):
results = get_json(identifier, namespace, domain, 'aids', searchtype=searchtype, **kwargs)
if not results:
return []
elif 'IdentifierList' in results:
return results['IdentifierList']['AID']
elif 'InformationList' in results:
return results['InformationList']['Information']
def get_all_sources(domain='substance'):
"""Return a list of all current depositors of substances or assays."""
results = json.loads(get(domain, None, 'sources').decode())
return results['InformationList']['SourceName']
def download(outformat, path, identifier, namespace='cid', domain='compound', operation=None, searchtype=None,
overwrite=False, **kwargs):
"""Format can be XML, ASNT/B, JSON, SDF, CSV, PNG, TXT."""
response = get(identifier, namespace, domain, operation, outformat, searchtype, **kwargs)
if not overwrite and os.path.isfile(path):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % path)
with open(path, 'wb') as f:
f.write(response)
def memoized_property(fget):
"""Decorator to create memoized properties.
Used to cache :class:`~pubchempy.Compound` and :class:`~pubchempy.Substance` properties that require an additional
request.
"""
attr_name = '_{0}'.format(fget.__name__)
@functools.wraps(fget)
def fget_memoized(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fget(self))
return getattr(self, attr_name)
return property(fget_memoized)
def deprecated(message=None):
"""Decorator to mark functions as deprecated. A warning will be emitted when the function is used."""
def deco(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
warnings.warn(
message or 'Call to deprecated function {}'.format(func.__name__),
category=PubChemPyDeprecationWarning,
stacklevel=2
)
return func(*args, **kwargs)
return wrapped
return deco
class Atom(object):
"""Class to represent an atom in a :class:`~pubchempy.Compound`."""
def __init__(self, aid, element, x=None, y=None, z=None, charge=0):
"""Initialize with an atom ID, element symbol, coordinates and optional change.
:param int aid: Atom ID
:param string element: Element symbol.
:param float x: X coordinate.
:param float y: Y coordinate.
:param float z: (optional) Z coordinate.
:param int charge: (optional) Formal charge on atom.
"""
self.aid = aid
"""The atom ID within the owning Compound."""
self.element = element
"""The element symbol for this atom."""
self.x = x
"""The x coordinate for this atom."""
self.y = y
"""The y coordinate for this atom."""
self.z = z
"""The z coordinate for this atom. Will be ``None`` in 2D Compound records."""
self.charge = charge
"""The formal charge on this atom."""
def __repr__(self):
return 'Atom(%s, %s)' % (self.aid, self.element)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.aid == other.aid and self.element == other.element and
self.x == other.x and self.y == other.y and self.z == other.z and self.charge == other.charge)
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __getitem__(self, prop):
"""Allow dict-style access to attributes to ease transition from when atoms were dicts."""
if prop in {'element', 'x', 'y', 'z', 'charge'}:
return getattr(self, prop)
raise KeyError(prop)
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __setitem__(self, prop, val):
"""Allow dict-style setting of attributes to ease transition from when atoms were dicts."""
setattr(self, prop, val)
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __contains__(self, prop):
"""Allow dict-style checking of attributes to ease transition from when atoms were dicts."""
if prop in {'element', 'x', 'y', 'z', 'charge'}:
return getattr(self, prop) is not None
return False
def to_dict(self):
"""Return a dictionary containing Atom data."""
data = {'aid': self.aid, 'element': self.element}
for coord in {'x', 'y', 'z'}:
if getattr(self, coord) is not None:
data[coord] = getattr(self, coord)
if self.charge is not 0:
data['charge'] = self.charge
return data
def set_coordinates(self, x, y, z=None):
"""Set all coordinate dimensions at once."""
self.x = x
self.y = y
self.z = z
@property
def coordinate_type(self):
"""Whether this atom has 2D or 3D coordinates."""
return '2d' if self.z is None else '3d'
class Bond(object):
"""Class to represent a bond between two atoms in a :class:`~pubchempy.Compound`."""
def __init__(self, aid1, aid2, order='single', style=None):
"""Initialize with begin and end atom IDs, bond order and bond style.
:param int aid1: Begin atom ID.
:param int aid2: End atom ID.
:param string order: Bond order.
"""
self.aid1 = aid1
"""ID of the begin atom of this bond."""
self.aid2 = aid2
"""ID of the end atom of this bond."""
self.order = order
"""Bond order."""
self.style = style
"""Bond style annotation."""
def __repr__(self):
return 'Bond(%s, %s, %s)' % (self.aid1, self.aid2, self.order)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.aid1 == other.aid1 and self.aid2 == other.aid2 and
self.order == other.order and self.style == other.style)
@deprecated('Dictionary style access to Bond attributes is deprecated')
def __getitem__(self, prop):
"""Allow dict-style access to attributes to ease transition from when bonds were dicts."""
if prop in {'order', 'style'}:
return getattr(self, prop)
raise KeyError(prop)
@deprecated('Dictionary style access to Bond attributes is deprecated')
def __setitem__(self, prop, val):
"""Allow dict-style setting of attributes to ease transition from when bonds were dicts."""
setattr(self, prop, val)
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __contains__(self, prop):
"""Allow dict-style checking of attributes to ease transition from when bonds were dicts."""
if prop in {'order', 'style'}:
return getattr(self, prop) is not None
return False
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __delitem__(self, prop):
"""Delete the property prop from the wrapped object."""
if not hasattr(self.__wrapped, prop):
raise KeyError(prop)
delattr(self.__wrapped, prop)
def to_dict(self):
"""Return a dictionary containing Bond data."""
data = {'aid1': self.aid1, 'aid2': self.aid2, 'order': self.order}
if self.style is not None:
data['style'] = self.style
return data
class Compound(object):
"""Corresponds to a single record from the PubChem Compound database.
The PubChem Compound database is constructed from the Substance database using a standardization and deduplication
process. Each Compound is uniquely identified by a CID.
"""
def __init__(self, record):
"""Initialize with a record dict from the PubChem PUG REST service.
For most users, the ``from_cid()`` class method is probably a better way of creating Compounds.
:param dict record: A compound record returned by the PubChem PUG REST service.
"""
self._record = None
self._atoms = {}
self._bonds = {}
self.record = record
@property
def record(self):
"""The raw compound record returned by the PubChem PUG REST service."""
return self._record
@record.setter
def record(self, record):
self._record = record
log.debug('Created %s' % self)
self._setup_atoms()
self._setup_bonds()
def _setup_atoms(self):
"""Derive Atom objects from the record."""
# Delete existing atoms
self._atoms = {}
# Create atoms
aids = self.record['atoms']['aid']
elements = self.record['atoms']['element']
if not len(aids) == len(elements):
raise ResponseParseError('Error parsing atom elements')
for aid, element in zip(aids, elements):
self._atoms[aid] = Atom(aid=aid, element=element)
# Add coordinates
if 'coords' in self.record:
coord_ids = self.record['coords'][0]['aid']
xs = self.record['coords'][0]['conformers'][0]['x']
ys = self.record['coords'][0]['conformers'][0]['y']
zs = self.record['coords'][0]['conformers'][0].get('z', [])
if not len(coord_ids) == len(xs) == len(ys) == len(self._atoms) or (zs and not len(zs) == len(coord_ids)):
raise ResponseParseError('Error parsing atom coordinates')
for aid, x, y, z in zip_longest(coord_ids, xs, ys, zs):
self._atoms[aid].set_coordinates(x, y, z)
# Add charges
if 'charge' in self.record['atoms']:
for charge in self.record['atoms']['charge']:
self._atoms[charge['aid']].charge = charge['value']
def _setup_bonds(self):
"""Derive Bond objects from the record."""
self._bonds = {}
if 'bonds' not in self.record:
return
# Create bonds
aid1s = self.record['bonds']['aid1']
aid2s = self.record['bonds']['aid2']
orders = self.record['bonds']['order']
if not len(aid1s) == len(aid2s) == len(orders):
raise ResponseParseError('Error parsing bonds')
for aid1, aid2, order in zip(aid1s, aid2s, orders):
self._bonds[frozenset((aid1, aid2))] = Bond(aid1=aid1, aid2=aid2, order=order)
# Add styles
if 'coords' in self.record and 'style' in self.record['coords'][0]['conformers'][0]:
aid1s = self.record['coords'][0]['conformers'][0]['style']['aid1']
aid2s = self.record['coords'][0]['conformers'][0]['style']['aid2']
styles = self.record['coords'][0]['conformers'][0]['style']['annotation']
for aid1, aid2, style in zip(aid1s, aid2s, styles):
self._bonds[frozenset((aid1, aid2))].style = style
@classmethod
def from_cid(cls, cid, **kwargs):
"""Retrieve the Compound record for the specified CID.
Usage::
c = Compound.from_cid(6819)
:param int cid: The PubChem Compound Identifier (CID).
"""
record = json.loads(request(cid, **kwargs).read().decode())['PC_Compounds'][0]
return cls(record)
def __repr__(self):
return 'Compound(%s)' % self.cid if self.cid else 'Compound()'
def __eq__(self, other):
return isinstance(other, type(self)) and self.record == other.record
def to_dict(self, properties=None):
"""Return a dictionary containing Compound data. Optionally specify a list of the desired properties.
synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is
because they each require an extra request.
"""
if not properties:
skip = {'aids', 'sids', 'synonyms'}
properties = [p for p in dir(Compound) if isinstance(getattr(Compound, p), property) and p not in skip]
return {p: [i.to_dict() for i in getattr(self, p)] if p in {'atoms', 'bonds'} else getattr(self, p) for p in properties}
def to_series(self, properties=None):
"""Return a pandas :class:`~pandas.Series` containing Compound data. Optionally specify a list of the desired
properties.
synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is
because they each require an extra request.
"""
import pandas as pd
return pd.Series(self.to_dict(properties))
@property
def cid(self):
"""The PubChem Compound Identifier (CID).
.. note::
When searching using a SMILES or InChI query that is not present in the PubChem Compound database, an
automatically generated record may be returned that contains properties that have been calculated on the
fly. These records will not have a CID property.
"""
if 'id' in self.record and 'id' in self.record['id'] and 'cid' in self.record['id']['id']:
return self.record['id']['id']['cid']
@property
def elements(self):
"""List of element symbols for atoms in this Compound."""
# Change to [a.element for a in self.atoms] ?
return self.record['atoms']['element']
@property
def atoms(self):
"""List of :class:`Atoms <pubchempy.Atom>` in this Compound."""
return sorted(self._atoms.values(), key=lambda x: x.aid)
@property
def bonds(self):
"""List of :class:`Bonds <pubchempy.Bond>` between :class:`Atoms <pubchempy.Atom>` in this Compound."""
return sorted(self._bonds.values(), key=lambda x: (x.aid1, x.aid2))
@memoized_property
def synonyms(self):
"""A ranked list of all the names associated with this Compound.
Requires an extra request. Result is cached.
"""
if self.cid:
results = get_json(self.cid, operation='synonyms')
return results['InformationList']['Information'][0]['Synonym'] if results else []
@memoized_property
def sids(self):
"""Requires an extra request. Result is cached."""
if self.cid:
results = get_json(self.cid, operation='sids')
return results['InformationList']['Information'][0]['SID'] if results else []
@memoized_property
def aids(self):
"""Requires an extra request. Result is cached."""
if self.cid:
results = get_json(self.cid, operation='aids')
return results['InformationList']['Information'][0]['AID'] if results else []
@property
def coordinate_type(self):
if 'twod' in self.record['coords'][0]['type']:
return '2d'
elif 'threed' in self.record['coords'][0]['type']:
return '3d'
@property
def charge(self):
"""Formal charge on this Compound."""
return self.record['charge'] if 'charge' in self.record else 0
@property
def molecular_formula(self):
"""Molecular formula."""
return _parse_prop({'label': 'Molecular Formula'}, self.record['props'])
@property
def molecular_weight(self):
"""Molecular Weight."""
return _parse_prop({'label': 'Molecular Weight'}, self.record['props'])
@property
def canonical_smiles(self):
"""Canonical SMILES, with no stereochemistry information."""
return _parse_prop({'label': 'SMILES', 'name': 'Canonical'}, self.record['props'])
@property
def isomeric_smiles(self):
"""Isomeric SMILES."""
return _parse_prop({'label': 'SMILES', 'name': 'Isomeric'}, self.record['props'])
@property
def inchi(self):
"""InChI string."""
return _parse_prop({'label': 'InChI', 'name': 'Standard'}, self.record['props'])
@property
def inchikey(self):
"""InChIKey."""
return _parse_prop({'label': 'InChIKey', 'name': 'Standard'}, self.record['props'])
@property
def iupac_name(self):
"""Preferred IUPAC name."""
# Note: Allowed, CAS-like Style, Preferred, Systematic, Traditional are available in full record
return _parse_prop({'label': 'IUPAC Name', 'name': 'Preferred'}, self.record['props'])
@property
def xlogp(self):
"""XLogP."""
return _parse_prop({'label': 'Log P'}, self.record['props'])
@property
def exact_mass(self):
"""Exact mass."""
return _parse_prop({'label': 'Mass', 'name': 'Exact'}, self.record['props'])
@property
def monoisotopic_mass(self):
"""Monoisotopic mass."""
return _parse_prop({'label': 'Weight', 'name': 'MonoIsotopic'}, self.record['props'])
@property
def tpsa(self):
"""Topological Polar Surface Area."""
return _parse_prop({'implementation': 'E_TPSA'}, self.record['props'])
@property
def complexity(self):
"""Complexity."""
return _parse_prop({'implementation': 'E_COMPLEXITY'}, self.record['props'])
@property
def h_bond_donor_count(self):
"""Hydrogen bond donor count."""
return _parse_prop({'implementation': 'E_NHDONORS'}, self.record['props'])
@property
def h_bond_acceptor_count(self):
"""Hydrogen bond acceptor count."""
return _parse_prop({'implementation': 'E_NHACCEPTORS'}, self.record['props'])
@property
def rotatable_bond_count(self):
"""Rotatable bond count."""
return _parse_prop({'implementation': 'E_NROTBONDS'}, self.record['props'])
@property
def fingerprint(self):
"""PubChem CACTVS fingerprint.
Each bit in the fingerprint represents the presence or absence of one of 881 chemical substructures.
More information at ftp://ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.txt
"""
return _parse_prop({'implementation': 'E_SCREEN'}, self.record['props'])
@property
def heavy_atom_count(self):
"""Heavy atom count."""
if 'count' in self.record and 'heavy_atom' in self.record['count']:
return self.record['count']['heavy_atom']
@property
def isotope_atom_count(self):
"""Isotope atom count."""
if 'count' in self.record and 'isotope_atom' in self.record['count']:
return self.record['count']['isotope_atom']
@property
def atom_stereo_count(self):
"""Atom stereocenter count."""
if 'count' in self.record and 'atom_chiral' in self.record['count']:
return self.record['count']['atom_chiral']
@property
def defined_atom_stereo_count(self):
"""Defined atom stereocenter count."""
if 'count' in self.record and 'atom_chiral_def' in self.record['count']:
return self.record['count']['atom_chiral_def']
@property
def undefined_atom_stereo_count(self):
"""Undefined atom stereocenter count."""
if 'count' in self.record and 'atom_chiral_undef' in self.record['count']:
return self.record['count']['atom_chiral_undef']
@property
def bond_stereo_count(self):
"""Bond stereocenter count."""
if 'count' in self.record and 'bond_chiral' in self.record['count']:
return self.record['count']['bond_chiral']
@property
def defined_bond_stereo_count(self):
"""Defined bond stereocenter count."""
if 'count' in self.record and 'bond_chiral_def' in self.record['count']:
return self.record['count']['bond_chiral_def']
@property
def undefined_bond_stereo_count(self):
"""Undefined bond stereocenter count."""
if 'count' in self.record and 'bond_chiral_undef' in self.record['count']:
return self.record['count']['bond_chiral_undef']
@property
def covalent_unit_count(self):
"""Covalently-bonded unit count."""
if 'count' in self.record and 'covalent_unit' in self.record['count']:
return self.record['count']['covalent_unit']
@property
def volume_3d(self):
conf = self.record['coords'][0]['conformers'][0]
if 'data' in conf:
return _parse_prop({'label': 'Shape', 'name': 'Volume'}, conf['data'])
@property
def multipoles_3d(self):
conf = self.record['coords'][0]['conformers'][0]
if 'data' in conf:
return _parse_prop({'label': 'Shape', 'name': 'Multipoles'}, conf['data'])
@property
def conformer_rmsd_3d(self):
coords = self.record['coords'][0]
if 'data' in coords:
return _parse_prop({'label': 'Conformer', 'name': 'RMSD'}, coords['data'])
@property
def effective_rotor_count_3d(self):
return _parse_prop({'label': 'Count', 'name': 'Effective Rotor'}, self.record['props'])
@property
def pharmacophore_features_3d(self):
return _parse_prop({'label': 'Features', 'name': 'Pharmacophore'}, self.record['props'])
@property
def mmff94_partial_charges_3d(self):
return _parse_prop({'label': 'Charge', 'name': 'MMFF94 Partial'}, self.record['props'])
@property
def mmff94_energy_3d(self):
conf = self.record['coords'][0]['conformers'][0]
if 'data' in conf:
return _parse_prop({'label': 'Energy', 'name': 'MMFF94 NoEstat'}, conf['data'])
@property
def conformer_id_3d(self):
conf = self.record['coords'][0]['conformers'][0]
if 'data' in conf:
return _parse_prop({'label': 'Conformer', 'name': 'ID'}, conf['data'])
@property
def shape_selfoverlap_3d(self):
conf = self.record['coords'][0]['conformers'][0]
if 'data' in conf:
return _parse_prop({'label': 'Shape', 'name': 'Self Overlap'}, conf['data'])
@property
def feature_selfoverlap_3d(self):
conf = self.record['coords'][0]['conformers'][0]
if 'data' in conf:
return _parse_prop({'label': 'Feature', 'name': 'Self Overlap'}, conf['data'])
@property
def shape_fingerprint_3d(self):
conf = self.record['coords'][0]['conformers'][0]
if 'data' in conf:
return _parse_prop({'label': 'Fingerprint', 'name': 'Shape'}, conf['data'])
def _parse_prop(search, proplist):
"""Extract property value from record using the given urn search filter."""
props = [i for i in proplist if all(item in i['urn'].items() for item in search.items())]
if len(props) > 0:
return props[0]['value'][list(props[0]['value'].keys())[0]]
class Substance(object):
"""Corresponds to a single record from the PubChem Substance database.
The PubChem Substance database contains all chemical records deposited in PubChem in their most raw form, before
any significant processing is applied. As a result, it contains duplicates, mixtures, and some records that don't
make chemical sense. This means that Substance records contain fewer calculated properties, however they do have
additional information about the original source that deposited the record.
The PubChem Compound database is constructed from the Substance database using a standardization and deduplication
process. Hence each Compound may be derived from a number of different Substances.
"""
@classmethod
def from_sid(cls, sid):
"""Retrieve the Substance record for the specified SID.
:param int sid: The PubChem Substance Identifier (SID).
"""
record = json.loads(request(sid, 'sid', 'substance').read().decode())['PC_Substances'][0]
return cls(record)
def __init__(self, record):
self.record = record
"""A dictionary containing the full Substance record that all other properties are obtained from."""
def __repr__(self):
return 'Substance(%s)' % self.sid if self.sid else 'Substance()'
def __eq__(self, other):
return isinstance(other, type(self)) and self.record == other.record
def to_dict(self, properties=None):
"""Return a dictionary containing Substance data.
If the properties parameter is not specified, everything except cids and aids is included. This is because the
aids and cids properties each require an extra request to retrieve.
:param properties: (optional) A list of the desired properties.
"""
if not properties:
skip = {'deposited_compound', 'standardized_compound', 'cids', 'aids'}
properties = [p for p in dir(Substance) if isinstance(getattr(Substance, p), property) and p not in skip]
return {p: getattr(self, p) for p in properties}
def to_series(self, properties=None):
"""Return a pandas :class:`~pandas.Series` containing Substance data.
If the properties parameter is not specified, everything except cids and aids is included. This is because the
aids and cids properties each require an extra request to retrieve.
:param properties: (optional) A list of the desired properties.
"""
import pandas as pd
return pd.Series(self.to_dict(properties))
@property
def sid(self):
"""The PubChem Substance Idenfitier (SID)."""
return self.record['sid']['id']
@property
def synonyms(self):
"""A ranked list of all the names associated with this Substance."""
if 'synonyms' in self.record:
return self.record['synonyms']
@property
def source_name(self):
"""The name of the PubChem depositor that was the source of this Substance."""
return self.record['source']['db']['name']
@property
def source_id(self):
"""Unique ID for this Substance within those from the same PubChem depositor source."""
return self.record['source']['db']['source_id']['str']
@property
def standardized_cid(self):
"""The CID of the Compound that was produced when this Substance was standardized.
May not exist if this Substance was not standardizable.
"""
for c in self.record['compound']:
if c['id']['type'] == 'standardized':
return c['id']['id']['cid']
@memoized_property
def standardized_compound(self):
"""Return the :class:`~pubchempy.Compound` that was produced when this Substance was standardized.
Requires an extra request. Result is cached.
"""
for c in self.record['compound']:
if c['id']['type'] == 'standardized':
return Compound.from_cid(c['id']['id']['cid'])
@property
def deposited_compound(self):
"""Return a :class:`~pubchempy.Compound` produced from the unstandardized Substance record as deposited.
The resulting :class:`~pubchempy.Compound` will not have a ``cid`` and will be missing most properties.
"""
for c in self.record['compound']:
if c['id']['type'] == 'deposited':
return Compound(c)
@memoized_property
def cids(self):
"""A list of all CIDs for Compounds that were produced when this Substance was standardized.
Requires an extra request. Result is cached."""
results = get_json(self.sid, 'sid', 'substance', 'cids')
return results['InformationList']['Information'][0]['CID'] if results else []
@memoized_property
def aids(self):
"""A list of all AIDs for Assays associated with this Substance.
Requires an extra request. Result is cached."""
results = get_json(self.sid, 'sid', 'substance', 'aids')
return results['InformationList']['Information'][0]['AID'] if results else []
class Assay(object):
@classmethod
def from_aid(cls, aid):
"""Retrieve the Assay record for the specified AID.
:param int aid: The PubChem Assay Identifier (AID).
"""
record = json.loads(request(aid, 'aid', 'assay', 'description').read().decode())['PC_AssayContainer'][0]
return cls(record)
def __init__(self, record):
self.record = record
"""A dictionary containing the full Assay record that all other properties are obtained from."""
def __repr__(self):
return 'Assay(%s)' % self.aid if self.aid else 'Assay()'
def __eq__(self, other):
return isinstance(other, type(self)) and self.record == other.record
def to_dict(self, properties=None):
"""Return a dictionary containing Assay data.
If the properties parameter is not specified, everything is included.
:param properties: (optional) A list of the desired properties.
"""
if not properties:
properties = [p for p in dir(Assay) if isinstance(getattr(Assay, p), property)]
return {p: getattr(self, p) for p in properties}
@property
def aid(self):
"""The PubChem Substance Idenfitier (SID)."""
return self.record['assay']['descr']['aid']['id']
@property
def name(self):
"""The short assay name, used for display purposes."""
return self.record['assay']['descr']['name']
@property
def description(self):
"""Description"""
return self.record['assay']['descr']['description']
@property
def project_category(self):
"""A category to distinguish projects funded through MLSCN, MLPCN or from literature.
Possible values include mlscn, mlpcn, mlscn-ap, mlpcn-ap, literature-extracted, literature-author,
literature-publisher, rnaigi.
"""
if 'project_category' in self.record['assay']['descr']:
return self.record['assay']['descr']['project_category']
@property
def comments(self):
"""Comments and additional information."""
return [comment for comment in self.record['assay']['descr']['comment'] if comment]
@property
def results(self):
"""A list of dictionaries containing details of the results from this Assay."""
return self.record['assay']['descr']['results']
@property
def target(self):
"""A list of dictionaries containing details of the Assay targets."""
if 'target' in self.record['assay']['descr']:
return self.record['assay']['descr']['target']
@property
def revision(self):
"""Revision identifier for textual description."""
return self.record['assay']['descr']['revision']
@property
def aid_version(self):
"""Incremented when the original depositor updates the record."""
return self.record['assay']['descr']['aid']['version']
def compounds_to_frame(compounds, properties=None):
"""Construct a pandas :class:`~pandas.DataFrame` from a list of :class:`~pubchempy.Compound` objects.
Optionally specify a list of the desired :class:`~pubchempy.Compound` properties.
"""
import pandas as pd
if isinstance(compounds, Compound):
compounds = [compounds]
properties = set(properties) | set(['cid']) if properties else None
return pd.DataFrame.from_records([c.to_dict(properties) for c in compounds], index='cid')
def substances_to_frame(substances, properties=None):
"""Construct a pandas :class:`~pandas.DataFrame` from a list of :class:`~pubchempy.Substance` objects.
Optionally specify a list of the desired :class:`~pubchempy.Substance` properties.
"""
import pandas as pd
if isinstance(substances, Substance):
substances = [substances]
properties = set(properties) | set(['sid']) if properties else None
return pd.DataFrame.from_records([s.to_dict(properties) for s in substances], index='sid')
# def add_columns_to_frame(dataframe, id_col, id_namespace, add_cols):
# """"""
# # Existing dataframe with some identifier column
# # But consider what to do if the identifier column is an index?
# # What about having the Compound/Substance object as a column?
class PubChemPyDeprecationWarning(Warning):
"""Warning category for deprecated features."""
pass
class PubChemPyError(Exception):
"""Base class for all PubChemPy exceptions."""
pass
class ResponseParseError(PubChemPyError):
"""PubChem response is uninterpretable."""
pass
class PubChemHTTPError(PubChemPyError):
"""Generic error class to handle all HTTP error codes."""
def __init__(self, e):
self.code = e.code
self.msg = e.reason
try:
self.msg += ': %s' % json.loads(e.read().decode())['Fault']['Details'][0]
except (ValueError, IndexError, KeyError):
pass
if self.code == 400:
raise BadRequestError(self.msg)
elif self.code == 404:
raise NotFoundError(self.msg)
elif self.code == 405:
raise MethodNotAllowedError(self.msg)
elif self.code == 504:
raise TimeoutError(self.msg)
elif self.code == 501:
raise UnimplementedError(self.msg)
elif self.code == 500:
raise ServerError(self.msg)
def __str__(self):
return repr(self.msg)
class BadRequestError(PubChemHTTPError):
"""Request is improperly formed (syntax error in the URL, POST body, etc.)."""
def __init__(self, msg='Request is improperly formed'):
self.msg = msg
class NotFoundError(PubChemHTTPError):
"""The input record was not found (e.g. invalid CID)."""
def __init__(self, msg='The input record was not found'):
self.msg = msg
class MethodNotAllowedError(PubChemHTTPError):
"""Request not allowed (such as invalid MIME type in the HTTP Accept header)."""
def __init__(self, msg='Request not allowed'):
self.msg = msg
class TimeoutError(PubChemHTTPError):
"""The request timed out, from server overload or too broad a request.
See :ref:`Avoiding TimeoutError <avoiding_timeouterror>` for more information.
"""
def __init__(self, msg='The request timed out'):
self.msg = msg
class UnimplementedError(PubChemHTTPError):
"""The requested operation has not (yet) been implemented by the server."""
def __init__(self, msg='The requested operation has not been implemented'):
self.msg = msg
class ServerError(PubChemHTTPError):
"""Some problem on the server side (such as a database server down, etc.)."""
def __init__(self, msg='Some problem on the server side'):
self.msg = msg
if __name__ == '__main__':
print(__version__)
|
mit
|
llhe/tensorflow
|
tensorflow/examples/learn/hdf5_classification.py
|
60
|
2190
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
learn = tf.contrib.learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
karpathy/arxiv-sanity-preserver
|
analyze.py
|
2
|
3440
|
"""
Reads txt files of all papers and computes tfidf vectors for all papers.
Dumps results to file tfidf.p
"""
import os
import pickle
from random import shuffle, seed
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from utils import Config, safe_pickle_dump
seed(1337)
max_train = 5000 # max number of tfidf training documents (chosen randomly), for memory efficiency
max_features = 5000
# read database
db = pickle.load(open(Config.db_path, 'rb'))
# read all text files for all papers into memory
txt_paths, pids = [], []
n = 0
for pid,j in db.items():
n += 1
idvv = '%sv%d' % (j['_rawid'], j['_version'])
txt_path = os.path.join('data', 'txt', idvv) + '.pdf.txt'
if os.path.isfile(txt_path): # some pdfs dont translate to txt
with open(txt_path, 'r') as f:
txt = f.read()
if len(txt) > 1000 and len(txt) < 500000: # 500K is VERY conservative upper bound
txt_paths.append(txt_path) # todo later: maybe filter or something some of them
pids.append(idvv)
print("read %d/%d (%s) with %d chars" % (n, len(db), idvv, len(txt)))
else:
print("skipped %d/%d (%s) with %d chars: suspicious!" % (n, len(db), idvv, len(txt)))
else:
print("could not find %s in txt folder." % (txt_path, ))
print("in total read in %d text files out of %d db entries." % (len(txt_paths), len(db)))
# compute tfidf vectors with scikits
v = TfidfVectorizer(input='content',
encoding='utf-8', decode_error='replace', strip_accents='unicode',
lowercase=True, analyzer='word', stop_words='english',
token_pattern=r'(?u)\b[a-zA-Z_][a-zA-Z0-9_]+\b',
ngram_range=(1, 2), max_features = max_features,
norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True,
max_df=1.0, min_df=1)
# create an iterator object to conserve memory
def make_corpus(paths):
for p in paths:
with open(p, 'r') as f:
txt = f.read()
yield txt
# train
train_txt_paths = list(txt_paths) # duplicate
shuffle(train_txt_paths) # shuffle
train_txt_paths = train_txt_paths[:min(len(train_txt_paths), max_train)] # crop
print("training on %d documents..." % (len(train_txt_paths), ))
train_corpus = make_corpus(train_txt_paths)
v.fit(train_corpus)
# transform
print("transforming %d documents..." % (len(txt_paths), ))
corpus = make_corpus(txt_paths)
X = v.transform(corpus)
print(v.vocabulary_)
print(X.shape)
# write full matrix out
out = {}
out['X'] = X # this one is heavy!
print("writing", Config.tfidf_path)
safe_pickle_dump(out, Config.tfidf_path)
# writing lighter metadata information into a separate (smaller) file
out = {}
out['vocab'] = v.vocabulary_
out['idf'] = v._tfidf.idf_
out['pids'] = pids # a full idvv string (id and version number)
out['ptoi'] = { x:i for i,x in enumerate(pids) } # pid to ix in X mapping
print("writing", Config.meta_path)
safe_pickle_dump(out, Config.meta_path)
print("precomputing nearest neighbor queries in batches...")
X = X.todense() # originally it's a sparse matrix
sim_dict = {}
batch_size = 200
for i in range(0,len(pids),batch_size):
i1 = min(len(pids), i+batch_size)
xquery = X[i:i1] # BxD
ds = -np.asarray(np.dot(X, xquery.T)) #NxD * DxB => NxB
IX = np.argsort(ds, axis=0) # NxB
for j in range(i1-i):
sim_dict[pids[i+j]] = [pids[q] for q in list(IX[:50,j])]
print('%d/%d...' % (i, len(pids)))
print("writing", Config.sim_path)
safe_pickle_dump(sim_dict, Config.sim_path)
|
mit
|
LucasGandel/TubeTK
|
Base/Python/pyfsa/mapcl.py
|
8
|
3869
|
"""mapcl.py
Demonstrate how to evaluate a maximum a-posteriori
graph classifier using N-fold cross-validation.
"""
__license__ = "Apache License, Version 2.0 (see TubeTK)"
__author__ = "Roland Kwitt, Kitware Inc., 2013"
__email__ = "E-Mail: [email protected]"
__status__ = "Development"
# Graph handling
import networkx as nx
# Machine learning
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import KFold
from sklearn.cross_validation import ShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing
from sklearn import svm
# Misc.
from optparse import OptionParser
import logging
import numpy as np
import scipy.sparse
import time
import sys
import os
# Fine-structure analysis
import core.fsa as fsa
import core.utils as utils
def main(argv=None):
if argv is None:
argv = sys.argv
# Setup vanilla CLI parsing and add custom arg(s).
parser = utils.setup_cli_parsing()
parser.add_option("",
"--mixComp",
help="number of GMM components.",
default=3,
type="int")
(options, args) = parser.parse_args()
# Setup logging
utils.setup_logging(options)
logger = logging.getLogger()
# Read graph file list and label file list
graph_file_list = utils.read_graph_file_list(options)
label_file_list = utils.read_label_file_list(options, graph_file_list)
# Read class info and grouping info
class_info = utils.read_class_info(options)
group_info = utils.read_group_info(options)
assert (group_info.shape[0] ==
len(class_info) ==
len(graph_file_list) ==
len(label_file_list))
# Zip lists together
data = zip(graph_file_list,
label_file_list,
class_info)
# Run fine-structure analysis
fsa_res = fsa.run_fsa(data,
options.radii,
options.recompute,
options.writeAs,
options.skip,
options.omitDegenerate)
data_mat = fsa_res['data_mat']
data_idx = fsa_res['data_idx']
# Create cross-validation folds (20% testing)
n_graphs = len(class_info)
cv = ShuffleSplit(n_graphs,
n_iter=options.cvRuns,
test_size=0.2,
random_state=0)
# Our unique class labels
label_set = np.unique(class_info)
if options.normalize:
logger.info("Running feature normalization ...")
scaler = preprocessing.StandardScaler(copy=False)
scaler.fit_transform(fsa_res['data_mat'])
scores = []
for cv_id, (trn, tst) in enumerate(cv):
models = []
for l in label_set:
l_idx = np.where(class_info == l)[0]
l_idx = np.asarray(l_idx).ravel()
l_trn = np.intersect1d(l_idx, trn)
pos = []
for i in l_trn:
tmp = np.where(fsa_res['data_idx']==i)[0]
pos.extend(list(tmp))
np_pos = np.asarray(pos)
gmm_model = fsa.estimate_gm(data_mat[np_pos,:], options.mixComp)
models.append(gmm_model)
predict = []
for i in tst:
pos = np.where(data_idx==i)[0]
map_idx = fsa.pp_gmm(data_mat[pos,:], models, argmax=True)
predict.append(label_set[map_idx])
# Score the MAP classifier
truth = [class_info[i] for i in tst]
score = accuracy_score(truth, predict)
print "yhat :", predict
print "gold :", truth
logger.info("Score (%.2d): %.2f" % (cv_id, 100*score))
scores.append(score)
utils.show_summary(scores)
if __name__ == "__main__":
main()
|
apache-2.0
|
navigator8972/vae_assoc
|
pyrbf_funcapprox.py
|
2
|
7983
|
import numpy as np
import matplotlib.pyplot as plt
class PyRBF_FunctionApproximator():
"""
an RBF function approximator for mono input and mono output...
note the features are a series of RBF basis function and a constant offset term
this is used to make the model can be easily initialized as a linear model...
"""
def __init__(self, rbf_type='gaussian', K=9, normalize=True):
self.K_ = K
self.type_ = rbf_type
self.rbf_parms_ = dict()
self.prepare_rbf_parameters()
self.theta_ = np.concatenate([np.zeros(self.K_), [0]])
self.normalize_rbf_ = normalize
self.upper_limit_ = None
self.lower_limit_ = None
#a function to map parameter theta to the linear constrainted space...
self.apply_lin_cons = None
return
def prepare_rbf_parameters(self):
#prepare rbf parameters
#gaussian
if self.type_ == 'gaussian':
self.rbf_parms_['mu'] = np.linspace(0.1, 0.9, self.K_)
self.rbf_parms_['sigma'] = 1. / self.K_
elif self.type_ == 'sigmoid':
#logistic curve, there might be other alternatives: e.g., erf, tanh
self.rbf_parms_['tau'] = self.K_ * 2
self.rbf_parms_['t0'] = np.linspace(1./self.K_, 1.0, self.K_)
else:
print 'Unknown RBF type'
return
def set_linear_equ_constraints(self, phases, target=None):
"""
this funciton allows to set linear equality constraints with the form
\Phi(phases)^T \theta == target
target is zero vector if not specified...
"""
if target is None:
const_rhs = np.zeros(len(phases))
else:
const_rhs = target
if len(const_rhs) == len(phases):
#valid constraint
#evaluate features at constrainted phase points
self.cons_feats = self.get_features(phases)
self.cons_invmat = np.linalg.pinv(self.cons_feats.T.dot(self.cons_feats))
self.cons_offset = const_rhs
self.apply_lin_cons = lambda theta_old: theta_old - self.cons_feats.dot(
self.cons_invmat.dot(self.cons_feats.T.dot(theta_old) - self.cons_offset))
return
def set_theta(self, theta):
self.theta_ = theta
return
def set_const_offset(self, offset):
#the last theta...
self.theta_[-1] = offset
return
def rbf_gaussian_evaluation(self, z, mu, sigma):
res = np.exp(-(z-mu)**2/sigma)
return res
def rbf_sigmoid_evaluation(self, z, t0, tau):
res = 1. / (1 + np.exp(-tau*(z - t0)))
return res
def set_limit(self, upper_limit=None, lower_limit=None):
if upper_limit is not None:
self.upper_limit_ = upper_limit
if lower_limit is not None:
self.lower_limit_ = lower_limit
return
def get_features(self, z):
def get_features_internal(z_var):
if self.type_ == 'gaussian':
res = np.array([ self.rbf_gaussian_evaluation(z_var, self.rbf_parms_['mu'][i], self.rbf_parms_['sigma']) for i in range(self.K_)])
if self.normalize_rbf_:
res = res / np.sum(res)
return np.concatenate([res, [1]])
elif self.type_ == 'sigmoid':
res = np.array([ self.rbf_sigmoid_evaluation(z_var, self.rbf_parms_['t0'][i], self.rbf_parms_['tau']) for i in range(self.K_)])
return np.concatenate([res, [1]])
else:
print 'Unknown RBF type'
res = [get_features_internal(z_var) for z_var in z]
return np.array(res).T
def fit(self, z, y, replace_theta=False):
"""
z: a series of phase variables...
y: function evaluation
"""
features = self.get_features(z)
U, s, V = np.linalg.svd(features.T)
significant_dims = len(np.where(s>1e-6)[0])
inv_feats = V.T[:, 0:significant_dims].dot(np.diag(1./s[0:significant_dims])).dot(U[:, 0:significant_dims].T)
res_theta = inv_feats.dot(y)
if replace_theta:
# print 'use fit parameters'
self.theta_ = res_theta
return res_theta
def evaluate(self, z, theta=None, trunc_limit=True):
"""
evaluate with given phase variables
"""
features = self.get_features(z)
if theta is None:
#use model parameters
res = features.T.dot(self.theta_)
else:
#use given parameters
res = features.T.dot(theta)
#truncate with limit if desired
if trunc_limit:
#are limits valid?
if self.upper_limit_ is not None and self.lower_limit_ is not None:
if self.upper_limit_ > self.lower_limit_:
res[res > self.upper_limit_] = self.upper_limit_
res[res < self.lower_limit_] = self.lower_limit_
return res
def gaussian_sampling(self, theta=None, noise=None, n_samples=10):
'''
conducting local gaussian sampling with the given mean theta and noise
use the current theta is the mean theta is None
use unit noise if covariance matrix is not given
'''
if theta is None:
mean = self.theta_
else:
mean = theta
if noise is None:
covar = np.eye(len(self.theta_))
elif isinstance(noise, int) or isinstance(noise, float):
covar = np.eye(len(self.theta_)) * noise
else:
covar = noise
#make white gaussian because we might need to apply the linear constraints...
#<hyin/Feb-07th-2016> hmm, actually this is shifted noise, so remember not to apply that again
samples = np.random.multivariate_normal(mean, covar, n_samples)
if self.apply_lin_cons is None:
res = samples
else:
#apply linear constraint to apply the null-space perturbation
res = [self.apply_lin_cons(s) for s in samples]
return np.array(res)
def PyRBF_FuncApprox_Test():
#test
#fit sin
n_samples = 100
z = np.linspace(0.0, 1.0, 100)
y = np.cos(2*np.pi*z)
#feature parms
mu = np.arange(0.1, 1.0, 0.1)
sigma = 1./len(mu)
#model
rbf_mdl = PyRBF_FunctionApproximator(rbf_type='sigmoid', K=10, normalize=True)
#fit
res_theta = rbf_mdl.fit(z, y, True)
print 'fit parameters:', res_theta
y_hat = rbf_mdl.evaluate(z[n_samples/4:3*n_samples/4])
#draw the results
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(True)
ax.plot(z, y, linewidth=3.0)
ax.plot(z[n_samples/4:3*n_samples/4], y_hat, '.-', linewidth=3.0)
plt.draw()
#test for sampling and apply linear constrains
raw_input('Press ENTER to continue the test of random sampling')
rbf_mdl = PyRBF_FunctionApproximator(rbf_type='gaussian', K=10, normalize=True)
y = np.sin(np.linspace(0.0, np.pi, len(z)))
res_theta = rbf_mdl.fit(z, y, True)
print 'fit parameters:', res_theta
#anchoring the initial point...
rbf_mdl.set_linear_equ_constraints([z[0]], [y[0]])
#sampling...
init_fix_samples = rbf_mdl.gaussian_sampling()
init_fix_trajs = [rbf_mdl.evaluate(z, s) for s in init_fix_samples]
#anchoring both end points...
rbf_mdl.set_linear_equ_constraints([z[0], z[-1]], [y[0], y[-1]])
both_fix_samples = rbf_mdl.gaussian_sampling()
both_fix_trajs = [rbf_mdl.evaluate(z, s) for s in both_fix_samples]
print init_fix_samples, both_fix_samples
#show them...
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.hold(True)
for traj in init_fix_trajs:
ax1.plot(z, traj, linewidth=3.0)
plt.draw()
ax2 = fig.add_subplot(212)
ax2.hold(True)
for traj in both_fix_trajs:
ax2.plot(z, traj, linewidth=3.0)
plt.draw()
return
|
bsd-2-clause
|
wiheto/teneto
|
teneto/networkmeasures/temporal_closeness_centrality.py
|
1
|
2495
|
"""Calculates temporal closeness centrality"""
import numpy as np
from .shortest_temporal_path import shortest_temporal_path
def temporal_closeness_centrality(tnet=None, paths=None):
r"""
Returns temporal closeness centrality per node.
Temporal closeness centrlaity is the sum of a node's
average temporal paths with all other nodes.
Parameters
-----------
tnet : array, dict, object
Temporal network input with nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Note
------
Only one input (tnet or paths) can be supplied to the function.
Returns
--------
:close: array
temporal closness centrality (nodal measure)
Notes
-------
Temporal closeness centrality is defined in [Close-1]_:
.. math:: C^T_{i} = {{1} \over {N-1}}\sum_j{1\over\\tau_{ij}}
Where :math:`\\tau_{ij}` is the average temporal paths between node i and j.
Note, there are multiple different types of temporal distance measures
that can be used in temporal networks.
If a temporal network is used as input (i.e. not the paths), then teneto
uses :py:func:`.shortest_temporal_path` to calculates the shortest paths.
See :py:func:`.shortest_temporal_path` for more details.
.. [Close-1]
Pan, R. K., & Saramäki, J. (2011).
Path lengths, correlations, and centrality in temporal networks.
Physical Review E - Statistical, Nonlinear, and Soft Matter Physics, 84(1).
[`Link https://doi.org/10.1103/PhysRevE.84.016105`_]
"""
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
# Change for HDF5: paths.groupby([from,to])
# Then put preallocated in a pathmat 2D array
pathmat = np.zeros([paths[['from', 'to']].max().max() + 1,
paths[['from', 'to']].max().max() + 1,
paths[['t_start']].max().max() + 1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
closeness = np.nansum(1 / np.nanmean(pathmat, axis=2),
axis=1) / (pathmat.shape[1] - 1)
return closeness
|
gpl-3.0
|
trankmichael/scikit-learn
|
sklearn/feature_extraction/text.py
|
24
|
50103
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
bsd-3-clause
|
meduz/scikit-learn
|
sklearn/tree/tests/test_export.py
|
33
|
9901
|
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in, assert_equal, assert_raises
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
y_degraded = [1, 1, 1, 1, 1, 1]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
contents1 = export_graphviz(clf, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
contents1 = export_graphviz(clf, feature_names=["feature0", "feature1"],
out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
contents1 = export_graphviz(clf, class_names=["yes", "no"], out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
contents1 = export_graphviz(clf, filled=True, impurity=False,
proportion=True, special_characters=True,
rounded=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
contents1 = export_graphviz(clf, max_depth=0,
class_names=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
contents1 = export_graphviz(clf, max_depth=0, filled=True,
out_file=None, node_ids=True)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=2,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
contents1 = export_graphviz(clf, filled=True,
impurity=False, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[3.0, 1.0, 0.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' \
'[3, 0, 0]]", fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n' \
'[0.0, 1.0, 0.5]]", fillcolor="#e5813986"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' \
'[0, 1, 0]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 3 ;\n' \
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 4 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=2,
criterion="mse",
random_state=2)
clf.fit(X, y)
contents1 = export_graphviz(clf, filled=True, leaves_parallel=True,
out_file=None, rotate=True, rounded=True)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e5813980"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
# Test classifier with degraded learning set
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(X, y_degraded)
contents1 = export_graphviz(clf, filled=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="gini = 0.0\\nsamples = 6\\nvalue = 6.0", ' \
'fillcolor="#e5813900"] ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
|
bsd-3-clause
|
betatim/BlackBox
|
skopt/tests/test_gp_opt.py
|
2
|
3168
|
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_less
import pytest
from skopt import gp_minimize
from skopt.benchmarks import bench1
from skopt.benchmarks import bench2
from skopt.benchmarks import bench3
from skopt.benchmarks import bench4
from skopt.benchmarks import branin
from skopt.utils import cook_estimator
def check_minimize(func, y_opt, bounds, acq_optimizer, acq_func,
margin, n_calls, n_random_starts=10):
r = gp_minimize(func, bounds, acq_optimizer=acq_optimizer,
acq_func=acq_func, n_random_starts=n_random_starts,
n_calls=n_calls, random_state=1,
noise=1e-10)
assert_less(r.fun, y_opt + margin)
SEARCH = ["sampling", "lbfgs"]
ACQUISITION = ["LCB", "EI"]
@pytest.mark.slow_test
@pytest.mark.parametrize("search", SEARCH)
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench1(search, acq):
check_minimize(bench1, 0.,
[(-2.0, 2.0)], search, acq, 0.05, 20)
@pytest.mark.slow_test
@pytest.mark.parametrize("search", SEARCH)
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench2(search, acq):
check_minimize(bench2, -5,
[(-6.0, 6.0)], search, acq, 0.05, 20)
@pytest.mark.slow_test
@pytest.mark.parametrize("search", SEARCH)
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench3(search, acq):
check_minimize(bench3, -0.9,
[(-2.0, 2.0)], search, acq, 0.05, 20)
@pytest.mark.fast_test
@pytest.mark.parametrize("search", ["sampling"])
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench4(search, acq):
# this particular random_state picks "2" twice so we can make an extra
# call to the objective without repeating options
check_minimize(bench4, 0.0,
[("-2", "-1", "0", "1", "2")], search, acq, 1.05, 6, 2)
@pytest.mark.fast_test
def test_n_jobs():
r_single = gp_minimize(bench3, [(-2.0, 2.0)], acq_optimizer="lbfgs",
acq_func="EI", n_calls=2, n_random_starts=1,
random_state=1, noise=1e-10)
r_double = gp_minimize(bench3, [(-2.0, 2.0)], acq_optimizer="lbfgs",
acq_func="EI", n_calls=2, n_random_starts=1,
random_state=1, noise=1e-10, n_jobs=2)
assert_array_equal(r_single.x_iters, r_double.x_iters)
@pytest.mark.fast_test
def test_gpr_default():
"""Smoke test that gp_minimize does not fail for default values."""
gp_minimize(branin, ((-5.0, 10.0), (0.0, 15.0)), n_random_starts=1,
n_calls=2)
@pytest.mark.fast_test
def test_use_given_estimator():
""" Test that gp_minimize does not use default estimator if one is passed
in explicitly. """
domain = [(1.0, 2.0), (3.0, 4.0)]
noise_correct = 1e+5
noise_fake = 1e-10
estimator = cook_estimator("GP", domain, noise=noise_correct)
res = gp_minimize(branin, domain, n_calls=1, n_random_starts=1,
base_estimator=estimator, noise=noise_fake)
assert res['models'][-1].noise == noise_correct
|
bsd-3-clause
|
belteshassar/cartopy
|
lib/cartopy/examples/always_circular_stereo.py
|
5
|
1313
|
__tags__ = ['Lines and polygons']
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import numpy as np
import cartopy.crs as ccrs
import cartopy.feature
def main():
fig = plt.figure(figsize=[10, 5])
ax1 = plt.subplot(1, 2, 1, projection=ccrs.SouthPolarStereo())
ax2 = plt.subplot(1, 2, 2, projection=ccrs.SouthPolarStereo(),
sharex=ax1, sharey=ax1)
fig.subplots_adjust(bottom=0.05, top=0.95,
left=0.04, right=0.95, wspace=0.02)
# Limit the map to -60 degrees latitude and below.
ax1.set_extent([-180, 180, -90, -60], ccrs.PlateCarree())
ax1.add_feature(cartopy.feature.LAND)
ax1.add_feature(cartopy.feature.OCEAN)
ax1.gridlines()
ax2.gridlines()
ax2.add_feature(cartopy.feature.LAND)
ax2.add_feature(cartopy.feature.OCEAN)
# Compute a circle in axes coordinates, which we can use as a boundary
# for the map. We can pan/zoom as much as we like - the boundary will be
# permanently circular.
theta = np.linspace(0, 2*np.pi, 100)
center, radius = [0.5, 0.5], 0.5
verts = np.vstack([np.sin(theta), np.cos(theta)]).T
circle = mpath.Path(verts * radius + center)
ax2.set_boundary(circle, transform=ax2.transAxes)
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
chunweiyuan/xarray
|
xarray/core/coordinates.py
|
1
|
11118
|
import collections.abc
from collections import OrderedDict
from contextlib import contextmanager
import pandas as pd
from . import formatting, indexing
from .merge import (
expand_and_merge_variables, merge_coords, merge_coords_for_inplace_math)
from .utils import Frozen, ReprObject, either_dict_or_kwargs
from .variable import Variable
# Used as the key corresponding to a DataArray's variable when converting
# arbitrary DataArray objects to datasets
_THIS_ARRAY = ReprObject('<this-array>')
class AbstractCoordinates(collections.abc.Mapping):
def __getitem__(self, key):
raise NotImplementedError
def __setitem__(self, key, value):
self.update({key: value})
@property
def indexes(self):
return self._data.indexes
@property
def variables(self):
raise NotImplementedError
def _update_coords(self, coords):
raise NotImplementedError
def __iter__(self):
# needs to be in the same order as the dataset variables
for k in self.variables:
if k in self._names:
yield k
def __len__(self):
return len(self._names)
def __contains__(self, key):
return key in self._names
def __repr__(self):
return formatting.coords_repr(self)
@property
def dims(self):
return self._data.dims
def to_index(self, ordered_dims=None):
"""Convert all index coordinates into a :py:class:`pandas.Index`.
Parameters
----------
ordered_dims : sequence, optional
Possibly reordered version of this object's dimensions indicating
the order in which dimensions should appear on the result.
Returns
-------
pandas.Index
Index subclass corresponding to the outer-product of all dimension
coordinates. This will be a MultiIndex if this object is has more
than more dimension.
"""
if ordered_dims is None:
ordered_dims = self.dims
elif set(ordered_dims) != set(self.dims):
raise ValueError('ordered_dims must match dims, but does not: '
'{} vs {}'.format(ordered_dims, self.dims))
if len(ordered_dims) == 0:
raise ValueError('no valid index for a 0-dimensional object')
elif len(ordered_dims) == 1:
(dim,) = ordered_dims
return self._data.get_index(dim)
else:
indexes = [self._data.get_index(k) for k in ordered_dims]
names = list(ordered_dims)
return pd.MultiIndex.from_product(indexes, names=names)
def update(self, other):
other_vars = getattr(other, 'variables', other)
coords = merge_coords([self.variables, other_vars],
priority_arg=1, indexes=self.indexes)
self._update_coords(coords)
def _merge_raw(self, other):
"""For use with binary arithmetic."""
if other is None:
variables = OrderedDict(self.variables)
else:
# don't align because we already called xarray.align
variables = expand_and_merge_variables(
[self.variables, other.variables])
return variables
@contextmanager
def _merge_inplace(self, other):
"""For use with in-place binary arithmetic."""
if other is None:
yield
else:
# don't include indexes in priority_vars, because we didn't align
# first
priority_vars = OrderedDict(
kv for kv in self.variables.items() if kv[0] not in self.dims)
variables = merge_coords_for_inplace_math(
[self.variables, other.variables], priority_vars=priority_vars)
yield
self._update_coords(variables)
def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset()
else:
other_vars = getattr(other, 'variables', other)
coords = expand_and_merge_variables([self.variables, other_vars])
return Dataset._from_vars_and_coord_names(coords, set(coords))
class DatasetCoordinates(AbstractCoordinates):
"""Dictionary like container for Dataset coordinates.
Essentially an immutable OrderedDict with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
def __init__(self, dataset):
self._data = dataset
@property
def _names(self):
return self._data._coord_names
@property
def variables(self):
return Frozen(OrderedDict((k, v)
for k, v in self._data.variables.items()
if k in self._names))
def __getitem__(self, key):
if key in self._data.data_vars:
raise KeyError(key)
return self._data[key]
def to_dataset(self):
"""Convert these coordinates into a new Dataset
"""
return self._data._copy_listed(self._names)
def _update_coords(self, coords):
from .dataset import calculate_dimensions
variables = self._data._variables.copy()
variables.update(coords)
# check for inconsistent state *before* modifying anything in-place
dims = calculate_dimensions(variables)
new_coord_names = set(coords)
for dim, size in dims.items():
if dim in variables:
new_coord_names.add(dim)
self._data._variables = variables
self._data._coord_names.update(new_coord_names)
self._data._dims = dict(dims)
self._data._indexes = None
def __delitem__(self, key):
if key in self:
del self._data[key]
else:
raise KeyError(key)
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return [key for key in self._data._ipython_key_completions_()
if key not in self._data.data_vars]
class DataArrayCoordinates(AbstractCoordinates):
"""Dictionary like container for DataArray coordinates.
Essentially an OrderedDict with keys given by the array's
dimensions and the values given by corresponding DataArray objects.
"""
def __init__(self, dataarray):
self._data = dataarray
@property
def _names(self):
return set(self._data._coords)
def __getitem__(self, key):
return self._data._getitem_coord(key)
def _update_coords(self, coords):
from .dataset import calculate_dimensions
coords_plus_data = coords.copy()
coords_plus_data[_THIS_ARRAY] = self._data.variable
dims = calculate_dimensions(coords_plus_data)
if not set(dims) <= set(self.dims):
raise ValueError('cannot add coordinates with new dimensions to '
'a DataArray')
self._data._coords = coords
self._data._indexes = None
@property
def variables(self):
return Frozen(self._data._coords)
def _to_dataset(self, shallow_copy=True):
from .dataset import Dataset
coords = OrderedDict((k, v.copy(deep=False) if shallow_copy else v)
for k, v in self._data._coords.items())
return Dataset._from_vars_and_coord_names(coords, set(coords))
def to_dataset(self):
return self._to_dataset()
def __delitem__(self, key):
del self._data._coords[key]
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return self._data._ipython_key_completions_()
class LevelCoordinatesSource(object):
"""Iterator for MultiIndex level coordinates.
Used for attribute style lookup with AttrAccessMixin. Not returned directly
by any public methods.
"""
def __init__(self, data_object):
self._data = data_object
def __getitem__(self, key):
# not necessary -- everything here can already be found in coords.
raise KeyError
def __iter__(self):
return iter(self._data._level_coords)
def assert_coordinate_consistent(obj, coords):
""" Maeke sure the dimension coordinate of obj is
consistent with coords.
obj: DataArray or Dataset
coords: Dict-like of variables
"""
for k in obj.dims:
# make sure there are no conflict in dimension coordinates
if k in coords and k in obj.coords:
if not coords[k].equals(obj[k].variable):
raise IndexError(
'dimension coordinate {!r} conflicts between '
'indexed and indexing objects:\n{}\nvs.\n{}'
.format(k, obj[k], coords[k]))
def remap_label_indexers(obj, indexers=None, method=None, tolerance=None,
**indexers_kwargs):
"""
Remap **indexers from obj.coords.
If indexer is an instance of DataArray and it has coordinate, then this
coordinate will be attached to pos_indexers.
Returns
-------
pos_indexers: Same type of indexers.
np.ndarray or Variable or DataArra
new_indexes: mapping of new dimensional-coordinate.
"""
from .dataarray import DataArray
indexers = either_dict_or_kwargs(
indexers, indexers_kwargs, 'remap_label_indexers')
v_indexers = {k: v.variable.data if isinstance(v, DataArray) else v
for k, v in indexers.items()}
pos_indexers, new_indexes = indexing.remap_label_indexers(
obj, v_indexers, method=method, tolerance=tolerance
)
# attach indexer's coordinate to pos_indexers
for k, v in indexers.items():
if isinstance(v, Variable):
pos_indexers[k] = Variable(v.dims, pos_indexers[k])
elif isinstance(v, DataArray):
# drop coordinates found in indexers since .sel() already
# ensures alignments
coords = OrderedDict((k, v) for k, v in v._coords.items()
if k not in indexers)
pos_indexers[k] = DataArray(pos_indexers[k],
coords=coords, dims=v.dims)
return pos_indexers, new_indexes
|
apache-2.0
|
0x1001/BabyMonitor
|
app/plot_occurences.py
|
1
|
3464
|
def plot_day(day, file_path=None):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
occurences = _get_occurences()
x = [day + datetime.timedelta(hours=i) for i in range(25)]
y = [0] * 25
for occur_time, occur_confidence in occurences:
if day.strftime("%d%m%y") == occur_time.date().strftime("%d%m%y"):
idx = int((occur_time - day).total_seconds() / (60 * 60))
y[idx] += 1
fig, ax = plt.subplots()
fig.set_size_inches(15, 12)
plt.bar(x, y, color='g', align='center', width=0.02)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.axis([x[0] - datetime.timedelta(seconds=30 * 60), x[-1] + datetime.timedelta(seconds=30 * 60), 0, max(y) + max(y) * 0.1])
plt.xticks(x)
plt.ylabel('Occurences')
plt.grid(True)
fig.autofmt_xdate()
if file_path is None:
plt.show()
else:
plt.savefig(file_path)
def plot_months(file_path=None):
import matplotlib.pyplot as plt
import datetime
import math
import matplotlib.dates as mdates
occurences = _get_occurences()
first = occurences[0][0].date()
last = datetime.datetime.now().date()
days = int(math.ceil(((last - first).total_seconds() / (60 * 60 * 24)))) + 1
x = [datetime.datetime.combine(first, datetime.time(hour=0, minute=0)) + datetime.timedelta(days=i) for i in range(days)]
y = [0] * days
for occur_time, occur_confidence in occurences:
idx = int(math.ceil((occur_time.date() - first).total_seconds() / (60 * 60 * 24)))
y[idx] += 1
fig, ax = plt.subplots()
fig.set_size_inches(15, 12)
plt.bar(x, y, color='r', align='center', width=0.5)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.xticks(x)
plt.ylabel('Occurences')
plt.grid(True)
plt.axis([x[0] - datetime.timedelta(days=1), x[-1] + datetime.timedelta(days=1), 0, max(y) + max(y) * 0.1])
fig.autofmt_xdate()
if file_path is None:
plt.show()
else:
plt.savefig(file_path)
def plot_confidence(file_path=None):
import matplotlib.pyplot as plt
occurences = _get_occurences()
x = range(0, 101)
y = [0] * 101
for occur_time, occur_confidence in occurences:
idx = int(occur_confidence)
y[idx] += 1
plt.bar(x, y, color='b', align='edge', width=1)
plt.ylabel('Confidence')
plt.grid(True)
plt.axis([0, 101, 0, max(y) + max(y) * 0.1])
if file_path is None:
plt.show()
else:
plt.savefig(file_path)
def _get_occurences():
import storage
import config
s = storage.Storage(config.Config("../config.json"))
return s.get_occurences()
if __name__ == "__main__":
import argparse
import datetime
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--day", type=str, help="Plot daily occurences. Date format: 2015-05-22", dest="day")
parser.add_argument("-c", "--confidence", action='store_true', help="Plot confidence ranges", dest="confidence")
parser.add_argument("-o", "--output", type=str, help="Store graph instead of displaying it. Path to file.", dest="output")
args = parser.parse_args()
if args.day is not None:
day = datetime.datetime.strptime(args.day, "%Y-%m-%d")
plot_day(day, args.output)
elif args.confidence:
plot_confidence(args.output)
else:
plot_months(args.output)
|
gpl-2.0
|
abhishekgahlot/scikit-learn
|
sklearn/feature_selection/__init__.py
|
244
|
1088
|
"""
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
|
bsd-3-clause
|
pombredanne/bokeh
|
tests/compat/listcollection.py
|
13
|
1637
|
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
def make_segments(x, y):
'''
Create list of line segments from x and y coordinates.
'''
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def colorline(x, y, colors=None, linewidth=3, alpha=1.0):
'''
Plot a line with segments.
Optionally, specify segments colors and segments widths.
'''
# Make a list of colors cycling through the rgbcmyk series.
# You have several ways to input the colors:
# colors = ['r','g','b','c','y','m','k']
# colors = ['red','green','blue','cyan','yellow','magenta','black']
# colors = ['#ff0000', '#008000', '#0000ff', '#00bfbf', '#bfbf00', '#bf00bf', '#000000']
# colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0), (0.0, 0.75, 0.75, 1.0),
# (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0), (0.0, 0.0, 0.0, 1.0)]
colors = ['r', 'g', 'b', 'c', 'y', 'm', 'k']
widths = [5, 10, 20, 40, 20, 10, 5]
segments = make_segments(x, y)
lc = LineCollection(segments, colors=colors, linewidth=widths, alpha=alpha)
ax = plt.gca()
ax.add_collection(lc)
return lc
# Colored sine wave
x = np.linspace(0, 4 * np.pi, 100)
y = np.sin(x)
colorline(x, y)
plt.title("MPL support for ListCollection in Bokeh")
plt.xlim(x.min(), x.max())
plt.ylim(-1.0, 1.0)
output_file("listcollection.html", title="listcollection.py example")
show(mpl.to_bokeh())
|
bsd-3-clause
|
SimeonFritz/aima-python
|
submissions/Kinley/myNN.py
|
13
|
3383
|
from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Kinley import drugs
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
drugData = DataFrame()
drugData.data = []
targetData = []
alcohol = drugs.get_surveys('Alcohol Dependence')
#tobacco = drugs.get_surveys('Tobacco Use')
i=0
for survey in alcohol[0]['data']:
try:
youngUser = float(survey['Young']),
youngUserFloat = youngUser[0]
midUser = float(survey['Medium']),
midUserFloat = midUser[0]
oldUser = float(survey['Old']),
oldUserFloat = oldUser[0]
place = survey['State']
total = youngUserFloat + midUserFloat + oldUserFloat
targetData.append(total)
youngCertain = float(survey['Young CI']),
youngCertainFloat = youngCertain[0]
midCertain = float(survey['Medium CI']),
midCertainFloat = midCertain[0]
oldCertain = float(survey['Old CI']),
oldCertainFloat = oldCertain[0]
drugData.data.append([youngCertainFloat, midCertainFloat, oldCertainFloat])
i = i + 1
except:
traceback.print_exc()
drugData.feature_names = [
'Young CI',
'Medium CI',
'Old CI',
]
drugData.target = []
def drugTarget(number):
if number > 100.0:
return 1
return 0
for pre in targetData:
# choose the target
tt = drugTarget(pre)
drugData.target.append(tt)
drugData.target_names = [
'States > 100k alcoholics',
'States < 100k alcoholics',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
# hidden_layer_sizes = (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
drugScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(drugData.data)
drugScaled.data = scaleGrid(drugData.data)
drugScaled.feature_names = drugData.feature_names
drugScaled.target = drugData.target
drugScaled.target_names = drugData.target_names
Examples = {
'drugDefault': {
'frame': drugData,
},
'drugSGD': {
'frame': drugData,
'mlpc': mlpc
},
'drugScaled': {
'frame': drugScaled,
},
}
|
mit
|
stylianos-kampakis/scikit-learn
|
doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py
|
256
|
2406
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
bsd-3-clause
|
jseabold/scikit-learn
|
examples/gaussian_process/plot_gpr_noisy_targets.py
|
45
|
3680
|
"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Jan Hendrik Metzen <[email protected]>s
# Licence: BSD 3 clause
import numpy as np
from matplotlib import pyplot as pl
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
|
bsd-3-clause
|
richrr/scripts
|
python/check_barcodes_hack_read2_only.py
|
1
|
22616
|
import os
import sys
from utils import *
import operator
from time import localtime, strftime
import argparse
import re
import os.path
from subprocess import Popen, PIPE
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
import math
#usage:
# for using only read 2, uncomment (A-E) and comment lines directly above them if present (except for D and E)
# may 30 2015, does this mean: # for using only read 2, uncomment (A-C) and comment lines directly above them if present, and comment D and E
#python ~/scripts/python/check_barcodes_hack_read2_only.py -i ../../../../../SampleSheet_w_groups.csv -d , -o barcode_stats_read2_q30.csv -l its-log-read2_q30.txt -x ./Intermed_fastq_files/read2_q30/ -y ./Intermed_info_files/read2_q30/ -z ./Trimmed_fastq_files/read2_q30/
def draw_histogram_adpaters_trimed_seqs(infile):
lines = read_file(infile)
counter = flag = allow_error_flag = leng_freq_flag = 0
adapter_trimmed_dict = defaultdict(list) # keeps track of how many times the adapter was cut without concern of length of reads ----(1)
allowed_error_dict = dict()
leng_freq_dict = defaultdict(int) # keeps track of how many reads were cut along with their size ------(2)
leng_freq_per_adap_dict = defaultdict(int)
adaptername = ''
for l in lines:
l = l.strip()
#print l
if not l:
counter = allow_error_flag = leng_freq_flag = 0
continue
counter += 1
if re.search('Adapter.*length.*was trimmed.*times', l) != None:
ad, adaptername_, seq, l, len, w, t, freq, times = l.split(' ') #----(1)
adaptername = adaptername_.replace("'", "")
#print l, adaptername, seq, len, freq
adapter_trimmed_dict[adaptername].append(freq) #because of defaultdict(list), When each key is encountered for the first time, it is not already in the mapping; so an entry is automatically created using the default_factory function which returns an empty list.
counter = 0
if 'No. of allowed errors:' in l:
counter = 0
allow_error_flag = 1
if allow_error_flag == 1 and counter == 1 and re.search('^0-19 bp: ', l) != None:
#print l
allowed_error_dict[adaptername] = l
counter = 0
allow_error_flag = 0
#sys.exit(0)
if 'length count expect max.err error counts' in l:
counter = 0
leng_freq_flag = 1
if leng_freq_flag == 1 and counter >= 1:
bases_removed, numb_reads, expec, max_err, err_counts = l.split('\t') #----(2)
#print bases_removed, numb_reads # err_counts are space delimited, others are tab-delim
leng_freq_dict[int(bases_removed)] += int(numb_reads)
leng_freq_per_adap_dict[adaptername, int(bases_removed)] += int(numb_reads)
#print allowed_error_dict , '\n', leng_freq_dict , '\n' , leng_freq_per_adap_dict
all_adaps_sum = numb_non_zero_values = numb_zero_values = avg = 0
for k,v in adapter_trimmed_dict.items():
l = [int(i) for i in v] # convert entire list to int
suml = sum(l) # sum of list
all_adaps_sum += suml
numb_non_zero_values = sum(x > 0 for x in l) # number of non zero elements
numb_zero_values = sum(1 for x in l if x == 0) # number of zero elements
#print k, l, suml, numb_non_zero_values, numb_zero_values
avg = 0
if numb_non_zero_values != 0:
avg = suml/numb_non_zero_values
text ='Adapter: %s trimmed an average of %d reads from %d samples, sample counted if atleast one of \n its read was trimmed for the adapter, from total %d samples, allowed error %s' %(k, avg, numb_non_zero_values, numb_non_zero_values+numb_zero_values, allowed_error_dict[k])
mydict = {x[1]:y for x,y in leng_freq_per_adap_dict.iteritems() if x[0]==k}
#print text, '\n', mydict
if mydict:
for t in ['scatter', 'line']:
draw_hist(t , k, mydict, k, text)
avg = all_adaps_sum/numb_non_zero_values
text ='All Adapter: trimmed an average of %d reads from %d samples, sample counted if atleast one of \n its read was trimmed for the adapter, from total %d samples, allowed error ' %(avg, numb_non_zero_values, numb_non_zero_values+numb_zero_values)
for a,e in allowed_error_dict.items():
text += '%s %s \n' %(a,e)
for t in ['scatter', 'line']:
draw_hist(t , 'all_adapters', leng_freq_dict, 'all_adapters', text, -0.12)
sys.exit(0)
def draw_hist(fig_type, fig_name, counted_data, fig_spec, text, ypos=-0.03):
k, v = counted_data.keys(), counted_data.values() # the orders are unchanged
if fig_type == 'line':
plt.plot(k,v)
if fig_type == 'scatter':
plt.scatter(k,v)
plt.yscale('log')
# to draw histogram
#plt.hist(counted_data.keys(), weights=counted_data.values(), bins=range(max(counted_data) + 10), log=True
plt.title('# of trimmed bases vs. # of reads ' + fig_spec)
plt.xlabel('Number of bases removed by trimming')
plt.ylabel('Number of reads (log scale)')
plt.figtext(0,ypos,text, fontsize='x-small')
plt.savefig(fig_name+fig_type+'.png', bbox_inches='tight') #replace with .pdf
plt.clf()
def grep_string(string, f):
cmd = 'grep -c "^%s" %s' %(string,f) # pattern
output_start = Popen(cmd, shell=True, stdout=PIPE).communicate()[0] # grep -c 'pattern' filename
#print cmd , '\n', output_start
cmd = 'grep -c "%s$" %s' %(string,f) # pattern
output_end = Popen(cmd, shell=True, stdout=PIPE).communicate()[0] # grep -c 'pattern' filename
#print cmd , '\n', output_end
return output_start, output_end
def grep_string_contains(string, f):
cmd = 'grep -c "%s" %s' %(string,f) # pattern
output = Popen(cmd, shell=True, stdout=PIPE).communicate()[0] # grep -c 'pattern' filename
#print cmd , '\n', output
return output
def grep_two_patterns(start, end, f):
cmd = 'egrep -c "^%s.+%s" %s' %(start, end,f) # pattern
output = Popen(cmd, shell=True, stdout=PIPE).communicate()[0] # grep -c 'pattern' filename
#print cmd , '\n', output
'''
cmd = 'egrep -c "^%s.+%s$" %s' %(start, end,f) # pattern
output = Popen(cmd, shell=True, stdout=PIPE).communicate()[0] # grep -c 'pattern' filename
print cmd , '\n', output
'''
return output
def grep_two_patterns_negate_first(start, end, f):
cmd = 'grep -v "^%s" %s | grep -c "%s"' %(start, f, end) # pattern
output = Popen(cmd, shell=True, stdout=PIPE).communicate()[0] # grep -c 'pattern' filename
#print cmd , '\n', output
return output
def grep_two_patterns_negate_second(start, end, f):
cmd = 'grep "^%s" %s | grep -v -c "%s"' %(start, f, end) # pattern
output = Popen(cmd, shell=True, stdout=PIPE).communicate()[0] # grep -c 'pattern' filename
#print cmd , '\n', output
return output
def grep_two_patterns_negate_both(start, end, f):
cmd = 'grep -A 1 "^@M" %s | grep -v "^%s" | grep -v "^@" | grep -v "\-\-" | grep -v -c "%s"' %(f, start, end) # pattern
output = Popen(cmd, shell=True, stdout=PIPE).communicate()[0] # grep -c 'pattern' filename
#print cmd , '\n', output
return output
def main(args):
parser = argparse.ArgumentParser(description='Check if the barcodes are present in the sample')
parser.add_argument('-i', '--infile') # file containing the sample name and the barcode string
parser.add_argument('-o', '--outfile', default="out.txt") # output filename
parser.add_argument('-l', '--logfile', default="ITS-log-file.txt") # output filename
parser.add_argument('-p', '--pairedend', action='store_true', default=False) # paired-end data, default single end
parser.add_argument('-f', '--fasta', action='store_true', default=False) # files in fasta format, default fastq
parser.add_argument('-q', '--quality', default=30, type=int) # Phred quality threshold of seq.
parser.add_argument('-x', '--intermedfastq', default='./Intermed_fastq_files/') # dir name for intermediate fastq files
parser.add_argument('-y', '--intermedinfo', default='./Intermed_info_files/') # dir name for intermediate info files
parser.add_argument('-z', '--trimfastq', default='./Trimmed_fastq_files/') # prefix for trimmed files
#parser.add_argument('-g', '--groupsfile') # file containing the group where each sample belongs
#parser.add_argument('-c', '--insertcolumn', default=99999, type=int) # 0-index based column where the row_sums is to be printed
parser.add_argument('-d', '--delimiter', default='\t') # delimiter for file
parser.add_argument('-m', '--histogram', action='store_true', default=False) # draw histogram for each adapter using infile
args = parser.parse_args()
if len(sys.argv)==1 :
parser.print_help()
sys.exit('\natleast one argument required\n')
infile = args.infile
outpfile = args.outfile
filetype = '.fasta' if args.fasta else '.fastq'
logfile = args.logfile
#end_col = args.endcolum
#insertcolumn = args.insertcolumn
delim = args.delimiter
if args.histogram:
draw_histogram_adpaters_trimed_seqs(infile)
sys.exit(0)
global FWDPRIMER, FADAPTER, REVPRIMER, RADAPTER, QUALITY, R_1, R_2, INTERMFASTQ, INTERMINFO, TRIMFASTQF
FWDPRIMER='ATCTACACTATGGTAATTGTGAACCWGCGGARGGATCA'
FADAPTER='AATGATACGGCGACCACCGAG'
REVPRIMER='GCATCGATGAAGAACGCAGCGGCTGACTGACT'
RADAPTER='ATCTCGTATGCCGTCTTCTGC'
QUALITY=args.quality
R_1 = '_L001_R1_001'
R_2 = '_L001_R2_001'
INTERMFASTQ = args.intermedfastq
INTERMINFO = args.intermedinfo
trimfastqd = args.trimfastq # dir
TRIMFASTQF = trimfastqd + 'trim_' # dir + prefix for files
for dir in [INTERMFASTQ, INTERMINFO, trimfastqd]:
if not os.path.exists(dir):
os.makedirs(dir)
lines = read_file(infile)
#GRL4463_S49_L001_R1_001
SAMPLE_BARCODE_DICT = dict()
for l in lines:
if '#' in l:
continue
cont = l.strip().split(delim)
sample = cont[0] + '_S'
barcode = cont[1]
if cont[0] in SAMPLE_BARCODE_DICT:
sys.exit('\nduplicate samples/barcodes detected, check sample sheet\n')
else:
SAMPLE_BARCODE_DICT[sample] = barcode
# 5' ITS1FI2 forward primer 3'
# adapter fwd-primer
#AATGATACGGCGACCACCGAG ATCTACACTATGGTAATTGTGAACCWGCGGARGGATCA
#5' adapter barcode rev-primer 3'
#CAAGCAGAAGACGGCATACGAGAT GATTCCGGCTCA AGTCAGTCAGCCGCTGCGTTCTTCATCGATGC
# reverse-complement
#5' rev-primer barcode adapter 3'
#GCATCGATGAAGAACGCAGCGGCTGACTGACT TGAGCCGGAATC ATCTCGTATGCCGTCTTCTGCTTG
# TGAGCCGGAATC
outfile = open(outpfile, 'w')
headers_list = ['file' , 'starts_bc', 'ends_bc', 'starts_rp', 'ends_rp', 'starts_ad', 'ends_ad', \
'starts_bc_rad' , 'ends_bc_rad' , 'starts_bc_ends_bc_rad', 'starts_bc_rad_ends_bc_rad',\
'starts_rp_bc_rad' , 'ends_rp_bc_rad' , 'starts_bc_ends_rp_bc_rad', 'starts_bc_rad_ends_rp_bc_rad',\
'starts_rp_bc_rad_ends_rp_bc_rad', 'neg_starts_bc_ends_rp_bc_rad', 'neg_starts_bc_rad_ends_rp_bc_rad',\
'starts_bc_neg_ends_rp_bc_rad', 'starts_bc_rad_neg_ends_rp_bc_rad', 'neg_starts_bc_rad_neg_ends_rp_bc_rad',\
'starts_fp', 'ends_fp', 'contains_fp','starts_fad', 'ends_fad', 'contains_fad',\
'starts_fad_fp', 'ends_fad_fp','contains_fad_fp']
outfile.write ("%s\n" % delim.join(headers_list))
for f in sorted(os.listdir('./')):
if os.path.isfile(f) and f.endswith(filetype):
for k in SAMPLE_BARCODE_DICT: # for each element in dict
#if k in f: # if the key is prefix in filename
if k in f and R_2 in f: # if the key is prefix in filename and only for read 2 --------------- A (uncomment for read 2, comment the line above)
barcode = SAMPLE_BARCODE_DICT[k]
counts_list = [f]
counts_list.extend(grep_string(barcode, f))
counts_list.extend(grep_string(REVPRIMER, f))
counts_list.extend(grep_string(RADAPTER, f))
bc_rad = barcode+RADAPTER
counts_list.extend(grep_string(bc_rad, f))
counts_list.append(grep_two_patterns(barcode, bc_rad, f))
counts_list.append(grep_two_patterns(bc_rad, bc_rad, f))
rp_bc_rad = REVPRIMER+barcode+RADAPTER
counts_list.extend(grep_string(rp_bc_rad, f))
counts_list.append(grep_two_patterns(barcode, rp_bc_rad, f))
counts_list.append(grep_two_patterns(bc_rad, rp_bc_rad, f))
counts_list.append(grep_two_patterns(rp_bc_rad, rp_bc_rad, f))
counts_list.append(grep_two_patterns_negate_first(barcode, rp_bc_rad, f))
counts_list.append(grep_two_patterns_negate_first(bc_rad, rp_bc_rad, f))
counts_list.append(grep_two_patterns_negate_second(barcode, rp_bc_rad, f))
counts_list.append(grep_two_patterns_negate_second(bc_rad, rp_bc_rad, f))
counts_list.append(grep_two_patterns_negate_both(bc_rad, rp_bc_rad, f))
counts_list.extend(grep_string(FWDPRIMER, f))
counts_list.append(grep_string_contains(FWDPRIMER, f))
counts_list.extend(grep_string(FADAPTER, f))
counts_list.append(grep_string_contains(FADAPTER, f))
fad_fp = FADAPTER+barcode
counts_list.extend(grep_string(fad_fp, f))
counts_list.append(grep_string_contains(fad_fp, f))
str_counts_list = [str(i.strip()) for i in counts_list]
#print '\t'.join(str_counts_list)
outfile.write ("%s\n" % delim.join(str_counts_list))
#sys.exit(0)
outfile.close()
completed_files_list = list()
outfile = open(logfile, 'w')
for f in sorted(os.listdir('./')):
if os.path.isfile(f) and f.endswith(filetype):
for k in SAMPLE_BARCODE_DICT: # for each element in dict
if k in f and f not in completed_files_list: # if the key is prefix in filename
#put the read 1 and read 2 file into completed list
#go ahead and do the trimming
barcode = SAMPLE_BARCODE_DICT[k]
read1 = read2 = ''
if R_1 in f:
read1 = f
read2 = f.replace(R_1, R_2)
elif R_2 in f:
read2 = f
read1 = f.replace(R_2, R_1)
completed_files_list.extend([read1, read2])
# trim, qf
output_dump = remove_adap_bc_primer_quality_trim_min_length (read1, read2, barcode)
outfile.write ("%s\n---- Sample ----\n" % ('\n'.join(output_dump)))
outfile.close()
def remove_adap_bc_primer_quality_trim_min_length (read1, read2, barcode):
# remove bc_rad from the front using -g and ^,
# rp_bc_rad from back using -b to account for partial seqs like pterSEQUENCE(of read1)
global FWDPRIMER, FADAPTER, REVPRIMER, RADAPTER, QUALITY, R_1, R_2
bc_rad = barcode + RADAPTER
rp_bc_rad = REVPRIMER + barcode + RADAPTER
fad_fp = FADAPTER + FWDPRIMER
# remove adapter then do Quality trimming (and min length) separately
# doing the above together causes quality trimm to be first
# if the adapter, bc, rp bases are low quality, they might get trimmed
# and then the trimming of adapter,bc,rp may be unsuccessful/confusing.
# although in this case, following is needed only for read1,
# I am going to do to both just to be safe and for use of future datasets
# although not needed, i am using the -a name=sequence format so i can use
# the name for the adapter column in the info file
output_dump = list()
read_file = read_file_base = ''
#for read in [read1,read2]:
for read in [read2]: #--------------- B (uncomment for read 2, comment the line above)
output_dump.append('--- Read ---')
adap_name = 'bc_rad__'
param = '-g %s=^%s' %(adap_name, bc_rad) # --front, ^ enforces search at start of string
output, infile_base, infile = trim_adap(adap_name, bc_rad, param, read, read)
#cmd = 'cutadapt -g %s=^%s -o %s -e 0.05 -O %s --info-file=%s %s' %(adap_name, bc_rad, tmp, overlap, infofile, read)
output_dump.append(output)
adap_name = 'rp_bc_rad__'
param = '-b %s=%s' %(adap_name, rp_bc_rad) # --anywhere
output, infile_base, infile = trim_adap(adap_name, rp_bc_rad, param, infile_base, infile)
#cmd = 'cutadapt -b %s=%s -o %s -e 0.05 -O %s --info-file=%s %s' %(adap_name, rp_bc_rad, tmp2, overlap, infofile, tmp)
output_dump.append(output)
adap_name = 'fad_fp__'
#param = '-b %s=%s' %(adap_name, fad_fp) # --anywhere
param = '-q %d --minimum-length 100 -b %s=%s' %(QUALITY, adap_name, fad_fp) #--------------- C (uncomment for read 2, comment the line above)
output, infile_base, infile = trim_adap(adap_name, fad_fp, param, infile_base, infile)
#cmd = 'cutadapt -b %s=%s -o %s -e 0.05 -O %s --info-file=%s %s' %(adap_name, fad_fp, tmp3, overlap, infofile, tmp2)
output_dump.append(output)
read_file = infile
read_file_base = infile_base
#output1, output2 = min_length_filter_paired(read_file, read_file_base, QUALITY, R_1, R_2) #--------------- D (comment for read 2)
#output_dump.extend([output1, output2]) #--------------- E (comment for read 2)
return output_dump
def trim_adap(adap_name, adap, param, infile_base, infile):
global INTERMFASTQ, INTERMINFO
ofilename_base = adap_name + infile_base
ofilename = INTERMFASTQ + ofilename_base
infofile = INTERMINFO + ofilename_base + '_info.txt'
overlap = len(adap) - 10
cmd = 'cutadapt %s -o %s -e 0.05 -O %s --info-file=%s %s' %(param, ofilename, overlap, infofile, infile)
output = Popen(cmd, shell=True, stdout=PIPE).communicate()[0]
#print cmd, '\n', output
return output, ofilename_base, ofilename
def min_length_filter_paired(read_file, read_file_base, QUALITY, R_1, R_2):
global TRIMFASTQF
trimmed2 = TRIMFASTQF + read_file_base # read file 2
trimmed1 = trimmed2.replace(R_2, R_1)
read2file = read_file
read1file = read_file.replace(R_2, R_1)
cmd='cutadapt -q %d --minimum-length 100 --paired-output tmp.2.fastq -o tmp.1.fastq %s %s' %(QUALITY, read1file, read2file)
output1 = Popen(cmd, shell=True, stdout=PIPE).communicate()[0]
#print cmd, '\n', output1
cmd='cutadapt -q %d --minimum-length 100 --paired-output %s -o %s tmp.2.fastq tmp.1.fastq' %(QUALITY, trimmed1, trimmed2)
output2 = Popen(cmd, shell=True, stdout=PIPE).communicate()[0]
#print cmd, '\n', output2
cmd='rm tmp.1.fastq tmp.2.fastq'
output = Popen(cmd, shell=True, stdout=PIPE).communicate()[0]
return output1, output2
'''
#First trim the forward read, writing output to temporary files (we also add some quality trimming):
cutadapt -q 10 --minimum-length 20 --paired-output tmp.2.fastq -o tmp.1.fastq reads.1.fastq reads.2.fastq
#Then trim the reverse read, using the temporary files as input:
cutadapt -q 15 --minimum-length 20 --paired-output trimmed.1.fastq -o trimmed.2.fastq tmp.2.fastq tmp.1.fastq
#Remove the temporary files:
rm tmp.1.fastq tmp.2.fastq
#quality threshold (30)
#overlap (length of pattern - 10)
#keep both trimmed and untrimmed reads (trim but do not discard)
#min length (100)
#output
#info file
#paired output
######## I want to avoid cases where it is trimming seqs of length 3 or which can be random
(high expect.), so use the --overlap criteria.
No. of allowed errors:
0-9 bp: 0; 10-19 bp: 1; 20-29 bp: 2; 30-39 bp: 3; 40-49 bp: 4; 50-59 bp: 5; 60-65 bp: 6
Overview of removed sequences (5')
length count expect max.err error counts
3 22 97.3 0 22
4 9 24.3 0 9
Overview of removed sequences (3' or within)
length count expect max.err error counts
3 48 97.3 0 48
4 21 24.3 0 21
5 14 6.1 0 14
###### Also some seqs are probably in the middle and being treated as 3' causing almost the entire read
to be trimmed, so either use different parameter (-a or -g)
69 5 0.0 6 2 1 0 1 0 0 1
70 59 0.0 6 17 17 8 1 6 4 6
71 44 0.0 6 13 10 6 4 5 1 5
72 223 0.0 6 61 46 33 15 23 23 22
73 90 0.0 6 16 17 12 10 13 13 9
74 5 0.0 6 0 1 2 0 2
75 7 0.0 6 2 3 0 1 1
76 7 0.0 6 1 2 1 1 0 1 1
77 9 0.0 6 4 1 0 1 0 2 1
78 4 0.0 6 0 1 0 1 1 0 1
79 1 0.0 6 0 0 1
80 6 0.0 6 0 2 0 0 2 1 1
81 3 0.0 6 0 1 0 0 1 1
82 5 0.0 6 1 2 0 0 0 0 2
83 8 0.0 6 1 2 3 0 0 1 1
84 3 0.0 6 0 0 0 1 1 0 1
85 2 0.0 6 0 1 0 0 0 0 1
88 1 0.0 6 1
93 1 0.0 6 0 1
94 1 0.0 6 0 1
100 1 0.0 6 0 1
103 1 0.0 6 0 1
106 2 0.0 6 1 0 0 0 1
108 1 0.0 6 1
110 1 0.0 6 0 1
111 1 0.0 6 0 1
115 1 0.0 6 0 1
121 1 0.0 6 1
122 1 0.0 6 1
125 1 0.0 6 1
133 2 0.0 6 1 0 0 0 0 0 1
147 1 0.0 6 1
152 1 0.0 6 1
157 1 0.0 6 1
158 1 0.0 6 1
182 1 0.0 6 1
'''
if __name__=='__main__':
datetime = strftime("%a, %d %b %Y %I:%M:%S %p", localtime())
cmd = 'echo ' + datetime
os.system(cmd)
main(sys.argv)
|
gpl-3.0
|
yunque/sms-tools
|
lectures/06-Harmonic-model/plots-code/spectral-peaks-and-f0.py
|
22
|
1040
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9, 5))
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([0,7000,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
harms = np.arange(1,20)*440.0
plt.vlines(harms, -80, max(mX)+1, color='g', lw=1.5)
plt.title('mX + peaks + f0 multiples (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('spectral-peaks-and-f0.png')
plt.show()
|
agpl-3.0
|
cuilishen/cuilishenMissionPlanner
|
Lib/site-packages/scipy/misc/common.py
|
53
|
10116
|
"""
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from numpy import exp, asarray, arange, newaxis, hstack, product, array, \
where, zeros, extract, place, pi, sqrt, eye, poly1d, dot, r_
__all__ = ['factorial','factorial2','factorialk','comb',
'central_diff_weights', 'derivative', 'pade', 'lena']
# XXX: the factorial functions could move to scipy.special, and the others
# to numpy perhaps?
def factorial(n,exact=0):
"""
The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=0 case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> arr = np.array([3,4,5])
>>> sc.factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> sc.factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0L
val = 1L
for k in xrange(1,n+1):
val *= k
return val
else:
from scipy import special
n = asarray(n)
sv = special.errprint(0)
vals = special.gamma(n+1)
sv = special.errprint(sv)
return where(n>=0,vals,0)
def factorial2(n, exact=False):
"""
Double factorial.
This is the factorial with every second value skipped, i.e.,
``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0L
if n <= 0:
return 1L
val = 1L
for k in xrange(n,0,-2):
val *= k
return val
else:
from scipy import special
n = asarray(n)
vals = zeros(n.shape,'d')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1,n)
evenn = extract(cond2,n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e))
return vals
def factorialk(n,k,exact=1):
"""
n(!!...!) = multifactorial of order k
k times
Parameters
----------
n : int, array_like
Calculate multifactorial. Arrays are only supported with exact
set to False. If n < 0, the return value is 0.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multi factorial of n.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> sc.factorialk(5, 1, exact=True)
120L
>>> sc.factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0L
if n<=0:
return 1L
val = 1L
for j in xrange(n,0,-k):
val = val*j
return val
else:
raise NotImplementedError
def comb(N,k,exact=0):
"""
The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
exact : int, optional
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, array
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=0 case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> sc.comb(n, k, exact=False)
array([ 120., 210.])
>>> sc.comb(10, 3, exact=True)
120L
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0L
val = 1L
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
from scipy import special
k,N = asarray(k), asarray(N)
lgam = special.gammaln
cond = (k <= N) & (N >= 0) & (k >= 0)
sv = special.errprint(0)
vals = exp(lgam(N+1) - lgam(N-k+1) - lgam(k+1))
sv = special.errprint(sv)
return where(cond, vals, 0.0)
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative of order ndiv
assuming equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at point x0.
Given a function, use a central difference formula with spacing `dx` to
compute the n-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which nth derivative is found.
dx : int, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> def x2(x):
... return x*x
...
>>> derivative(x2, 2)
4.0
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n==1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n==2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""Given Taylor series coefficients in an, return a Pade approximation to
the function as the ratio of two polynomials p / q where the order of q is m.
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N-m
if (n < 0):
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1,n+1)
Bkj = zeros((N+1,m),'d')
for row in range(1,m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1,N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj,Bkj))
pq = dot(linalg.inv(C),an)
p = pq[:n+1]
q = r_[1.0,pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Get classic image processing example image, Lena, at 8-bit grayscale
bit-depth, 512 x 512 size.
Parameters
----------
None
Returns
-------
lena : ndarray
Lena image
Examples
--------
>>> import scipy.misc
>>> lena = scipy.misc.lena()
>>> lena.shape
(512, 512)
>>> lena.max()
245
>>> lena.dtype
dtype('int32')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(lena)
>>> plt.show()
"""
import cPickle, os
fname = os.path.join(os.path.dirname(__file__),'lena.dat')
f = open(fname,'rb')
lena = array(cPickle.load(f))
f.close()
return lena
|
gpl-3.0
|
nimagh/CNN_Implementations
|
common/tools_train.py
|
1
|
4075
|
from tensorflow.examples.tutorials.mnist import input_data
import custom_input_data
import matplotlib.pyplot as plt
from tools_general import np, tf
import scipy.misc
def get_train_params(data_dir, batch_size, epochs=20, test_in_each_epoch=1,one_hot=False, networktype='GAN_MNIST'):
if 'img2img' in networktype:
data_dir = data_dir + '/' + networktype.replace('_A2B','').replace('_B2A','')
data = custom_input_data.load_dataset(data_dir, networktype=networktype)
else:
data = input_data.read_data_sets(data_dir + '/' + networktype, one_hot=one_hot, reshape=False)
train_num = data.train.num_examples # total number of training images
test_num = data.test.num_examples # total number of validation images
print('Trainset size:', train_num, 'Testset_size:', test_num)
max_iter = int(np.ceil(epochs * train_num / batch_size))
test_iter = int(np.ceil(test_num / batch_size))
test_interval = int(train_num / (test_in_each_epoch * batch_size)) # test 2 times in each epoch
disp_interval = int(test_interval * 2)
if disp_interval == 0: disp_interval = 1
# snapshot_interval = test_interval * 5 # save at every epoch
return data, max_iter, test_iter, test_interval, disp_interval
def OneHot(X, n=10):
return np.eye(n)[np.array(X).reshape(-1)].astype(np.float32)
def vis_square(X, nh_nw, save_path=None):
h, w = X.shape[1], X.shape[2]
img = np.zeros((h * nh_nw[0], w * nh_nw[1], 3))
for n, x in enumerate(X):
j = n // nh_nw[1]
i = n % nh_nw[1]
img[j * h:j * h + h, i * w:i * w + w, :] = x
if save_path:
scipy.misc.imsave(save_path, img)
return save_path
else:
return img
def plot_latent_variable(data, labels):
if data.shape[1] != 2:
pca = PCA(n_components=2)
data = pca.fit_transform(data)
print(pca.explained_variance_ratio_)
plt.figure(figsize=(8, 8))
plt.axes().set_aspect('equal')
color = plt.cm.rainbow(np.linspace(0, 1, 10))
for l, c in enumerate(color):
idxs = np.where(labels==l)
plt.scatter(data[idxs, 0], data[idxs, 1], c=c, label=l, linewidth=0, s=8)
plt.legend()
plt.show()
def demo_latent_variable(Xrec, Z_mu, labels, save_path):
num_colors = ['C0.','C1.','C2.','C3.','C4.','C5.','C6.','C7.','C8.','C9.']
fig = plt.figure(figsize=(10,5))
#fig.suptitle('Iter. #%d, Test_loss = %1.5f'%(it,best_test_loss))
likelihood = np.zeros([100, 28, 28, 1])
ax1 = fig.add_subplot(121)
for num in range(10):
ax1.plot(Z_mu[np.where(labels==num)[0],0],Z_mu[np.where(labels==num)[0],1],num_colors[num], label='%d'%num)
likelihood[np.arange(0,100,10)+num] = Xrec[np.where(labels==num)[0][:10]]
#print(np.arange(0,100,10)+num)
ax1.legend(loc='upper right', bbox_to_anchor=(1.1, 1.05), ncol=1, fancybox=True, shadow=True)
ax1.set_xlabel('Latent Dimension #1');ax1.set_ylabel('Latent Dimension #2')
ax1.set_ylim([-7,7]);ax1.set_xlim([-7,7])
ax2 = fig.add_subplot(122)
ax2.imshow(vis_square(likelihood, [10, 10]), cmap='gray')
ax2.set_xticks([])
ax2.set_yticks([])
plt.savefig(save_path, dpi=300)
plt.close()
def count_model_params(variables=None):
if variables == None:
variables = tf.trainable_variables()
total_parameters = 0
for variable in variables:
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
return(total_parameters)
def get_demo_data(data, spl=50):
all_test_set, all_labels = data.test.next_batch(data.test.num_examples)
Xdemo = np.zeros([spl*10, 28,28,1])
Xdemo_labels = np.zeros([spl*10, 1])
for num in range(10):
Xdemo[spl*num:spl*num+spl,:] = all_test_set[np.where(all_labels==num)[0]][0:spl]
Xdemo_labels[spl*num:spl*num+spl,:] = num
Xdemo_labels_OH = OneHot(Xdemo_labels.astype(np.int32))
return Xdemo, Xdemo_labels
|
gpl-3.0
|
CallumNeeson/CP365
|
MovieCluster/movieCluster.py
|
1
|
5724
|
import matplotlib.pyplot as plt
import math
import numpy as np
from random import randint
np.random.seed(42)
class Cluster:
def __init__(self, centroid, clusterDic):
self.centroid = centroid
self.clusterDic = clusterDic
##adds new value to cluster
def appendToClusterDic(self, movieKey, movieVal):
self.clusterDic[movieKey] = movieVal
##recalculates centroid by averaging each user's rating for each movie in the current cluster.
def recalculateCentroid(self):
updatedCentroid = {}
for user, rating in self.centroid.iteritems():
cost = 0
validUsers = 0;
for movieID, movieDic in self.clusterDic.iteritems():
if user in movieDic:
cost += movieDic[user]
validUsers += 1
if cost != 0: updatedRating = cost/validUsers
else: updatedRating = 0
updatedCentroid[user] = updatedRating
return updatedCentroid
##def printCluster(self):
class ClusterModel:
def __init__(self, k, mainDic, userID, centroidsArr, clusterArr):
self.k = k ##number of clusters
self.mainDic = mainDic ##dictionary of all movies
self.userID = userID
self.centroidsArr = centroidsArr ##array of centroid dictionaries
self.clusterArr = clusterArr ##array of cluster objects
##calculates random centroids for initial run
def makeRandomCentroid(self):
for i in range(self.k):
i = {}
for j in range(len(self.userID)):
i[userID[j]] = randint( 0, 5 )
self.centroidsArr.append(i)
##passes in the centroid to clusters that have no movies added yet
def makeNewClusters(self):
for i in range(self.k):
emptyDict = {}
newCluster = Cluster(self.centroidsArr[i], emptyDict)
self.clusterArr.append(newCluster)
##goes through mainDic and appends each movie to the cluster with the
## centroid most alike its ratings
def evaluateClusters(self):
for key, value in self.mainDic.iteritems():
bestCluster = self.clusterArr[0]
bestCost = 10000
for cluster in self.clusterArr:
currVal = self.movieDistance( cluster.centroid, value )
if( currVal < bestCost ):
bestCost = currVal
bestCluster = cluster
bestCluster.appendToClusterDic( key, value )
##computes distance between 2 movie dictionaries by seeing difference in each users rating
##skips 0 values in both
def movieDistance(self, movie1, movie2 ):
cost = 0
for key, value in movie1.iteritems():
if key in movie2:
cost += (movie1[key] - movie2[key]) ** 2
return cost
def calculateClusterError(self, cluster):
error = 0
for key, value in cluster.clusterDic.iteritems():
error += self.movieDistance(value, cluster.centroid)
return error
def calculateAllError(self):
totalerror = 0
for cluster in self.clusterArr:
totalerror += self.calculateClusterError(cluster)
print "total Error:"
print totalerror
def printModel(self, i):
for cluster in self.clusterArr:
val = "Movie Cluster: "
for key, value in cluster.clusterDic.iteritems():
val += str(key) + ", "
print "Error: " + str(self.calculateClusterError(cluster))
print "Epoch #: " + str(i)
##print "Centroid: " + str(cluster.centroid)
print val
##makes random centroids, inputs them into clusters, and then appends
##each movie to the cluster with the centroid closest to its values
def initialize(self):
self.makeRandomCentroid()
self.makeNewClusters()
self.evaluateClusters()
##Recalculates each centroid and recomputes the clusters accordingly
def train(self, epochs):
for i in range(epochs):
newCentroidsArr = []
for j in range(len(self.clusterArr)):
newCentroidsArr.append(self.clusterArr[j].recalculateCentroid())
self.centroidsArr = newCentroidsArr
self.clusterArr = []
self.makeNewClusters()
self.evaluateClusters()
self.calculateAllError()
##self.printModel(i)
#returns 3 NP arrays that correspond to the UserID, MovieID, and Rating
def loadDataset(filename="u.data"):
my_data = np.genfromtxt(filename, skip_header=0)
userID = my_data[:, 0]
movieID = my_data[:, 1]
rating = my_data[:, 2]
return userID, movieID, rating
##makes dataset 'mainDic' which is the main dictionary that has a movie as a key, and another dictionary as
##its value which stores each user and their rating for that movie.
def makeDataSet(userID, movieID, rating):
mainDic = {}
for i in range(len(movieID)):
if movieID[i] in mainDic:
mainDic[movieID[i]].update({userID[i]: rating[i]})
else:
mainDic[movieID[i]] = {userID[i]: rating[i]}
return mainDic
if __name__=="__main__":
userID, movieID, rating = loadDataset()
mainDic = makeDataSet( userID, movieID, rating )
emptyCentroidsArr = []
emptyClusterArr = []
##creates the model
movieRatingsModel = ClusterModel(8, mainDic, userID, emptyCentroidsArr, emptyClusterArr)
##initializes the model by creating random centroids and clustering the movies accordingly
movieRatingsModel.initialize()
##Clusters the set based on number of epochs
movieRatingsModel.train(100)
|
gpl-3.0
|
hpparvi/PyTransit
|
pytransit/contamination/plotting.py
|
1
|
5964
|
# PyTransit: fast and easy exoplanet transit modelling in Python.
# Copyright (C) 2010-2020 Hannu Parviainen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import matplotlib as mpl
import matplotlib.pyplot as pl
import seaborn as sb
from matplotlib.gridspec import GridSpec
from numpy import linspace, median, argmin, percentile
color = sb.color_palette()[0]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [sb.utils.set_hls_values(color_rgb, l=l) for l in linspace(1, 0, 12)]
cmap = sb.blend_palette(colors, as_cmap=True)
color = sb.color_palette()[1]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [sb.utils.set_hls_values(color_rgb, l=l) for l in linspace(1, 0, 12)]
cmap2 = sb.blend_palette(colors, as_cmap=True)
## Color definitions
## -----------------
c_ob = "#002147" # Oxford blue
c_bo = "#CC5500" # Burnt orange
def plot_kdist(samples, side: str = 'right', ax=None, clip: tuple = (0, 1), percentiles: tuple = (16, 84),
offset: float = 0.02, bw=0.005, gridsize: int = 200):
assert side in ('left', 'right')
sign = 1 if side == 'right' else -1
fig, ax = (None, ax) if ax is not None else pl.subplots()
p = sb.kdeplot(samples, kernel='cos', bw=bw, gridsize=gridsize, cut=0, clip=clip, vertical=True, ax=ax, color='k',
legend=False)
xd, yd = p.lines[-1].get_xdata(), p.lines[-1].get_ydata()
m = median(samples)
my = xd[argmin(abs(yd - m))] / xd.max()
p.lines[-1].set_xdata(sign * (offset + xd / xd.max()))
ax.plot((sign * offset, sign * offset), clip, 'k')
ax.plot((sign * offset, sign * my), (m, m), 'k')
p = percentile(samples, percentiles)
mask = (yd > p[0]) & (yd < p[1])
ax.fill_betweenx(yd[mask], sign * (offset + xd[mask] / xd.max()), sign*offset, alpha=0.25)
return fig
def plot_two_sided_kde(left, right, clip: tuple = (0, 1), percentiles: tuple = (16, 84),
offset: float = 0.02, bw=0.005, gridsize: int = 200, ax = None):
fig, ax = (None, ax) if ax is not None else pl.subplots()
plot_kdist(left, side='left', clip=clip, percentiles=percentiles, offset=offset, bw=bw, gridsize=gridsize, ax=ax)
plot_kdist(right, side='right', clip=clip, percentiles=percentiles, offset=offset, bw=bw, gridsize=gridsize, ax=ax)
pl.setp(ax, xlim=(-1.1, 1.1))
return fig
def _jplot(hte, cte, cnr, imp, rho, fw=10, nb=30, gs=25, simulation=False, **kwargs):
htelim = kwargs.get('htelim', (2000, 8000))
ctelim = kwargs.get('ctelim', (4000, 12000))
blim = kwargs.get('blim', (0, 1))
rlim = kwargs.get('rlim', (0, 15))
clim = kwargs.get('clim', (0, 1))
fig = pl.figure(figsize=(fw, fw / 4))
gs_tt = GridSpec(2, 1, bottom=0.2, top=1, left=0.1, right=0.3, hspace=0, wspace=0, height_ratios=[0.15, 0.85])
gs_ct = GridSpec(2, 5, bottom=0.2, top=1, left=0.37, right=1, hspace=0.05, wspace=0.05,
height_ratios=[0.15, 0.85],
width_ratios=[1, 1, 1, 1, 0.2])
ax_tt = pl.subplot(gs_tt[1, 0])
ax_chj = pl.subplot(gs_ct[1, 0])
ax_ccj = pl.subplot(gs_ct[1, 1])
ax_cbj = pl.subplot(gs_ct[1, 2])
ax_crj = pl.subplot(gs_ct[1, 3])
ax_thm = pl.subplot(gs_ct[0, 0])
ax_ctm = pl.subplot(gs_ct[0, 1])
ax_bm = pl.subplot(gs_ct[0, 2])
ax_rm = pl.subplot(gs_ct[0, 3])
ax_cnm = pl.subplot(gs_ct[1, 4])
ax_tt.hexbin(hte, cte, gridsize=gs, cmap=cmap, extent=(htelim[0], htelim[1], ctelim[0], ctelim[1]))
ax_chj.hexbin(hte, cnr, gridsize=gs, cmap=cmap, extent=(htelim[0], htelim[1], clim[0], clim[1]))
ax_ccj.hexbin(cte, cnr, gridsize=gs, cmap=cmap, extent=(ctelim[0], ctelim[1], clim[0], clim[1]))
ax_cbj.hexbin(imp, cnr, gridsize=gs, cmap=cmap, extent=(blim[0], blim[1], clim[0], clim[1]))
ax_crj.hexbin(rho, cnr, gridsize=gs, cmap=cmap, extent=(rlim[0], rlim[1], clim[0], clim[1]))
ax_thm.hist(hte, bins=nb, alpha=0.5, range=htelim)
ax_ctm.hist(cte, bins=nb, alpha=0.5, range=ctelim)
ax_bm.hist(imp, bins=nb, alpha=0.5, range=blim)
ax_rm.hist(rho, bins=nb, alpha=0.5, range=rlim)
ax_cnm.hist(cnr, bins=nb, alpha=0.5, range=clim, orientation='horizontal')
pl.setp(ax_tt, xlabel='Host $T_\mathrm{Eff}$', ylabel='Contaminant $T_\mathrm{Eff}$')
pl.setp(ax_chj, xlabel='Host $T_\mathrm{Eff}$', ylabel='Contamination in $i\'$')
pl.setp(ax_ccj, xlabel='Contaminant $T_\mathrm{Eff}$')
pl.setp(ax_cbj, xlabel='Impact parameter')
pl.setp(ax_crj, xlabel='Stellar density')
pl.setp(ax_thm, xlim=ax_chj.get_xlim())
pl.setp(ax_ctm, xlim=ax_ccj.get_xlim())
pl.setp(ax_bm, xlim=ax_cbj.get_xlim())
pl.setp([ax_ccj, ax_cnm], ylim=ax_chj.get_ylim())
pl.setp([ax_chj, ax_ccj, ax_cbj, ax_crj, ax_cnm], ylim=clim)
pl.setp([ax_thm, ax_ctm, ax_cnm, ax_bm, ax_rm], yticks=[], xticks=[])
pl.setp(ax_ccj.get_yticklabels(), visible=False)
pl.setp(ax_cbj.get_yticklabels(), visible=False)
pl.setp(ax_crj.get_yticklabels(), visible=False)
[sb.despine(ax=ax, left=True, offset=0.1) for ax in [ax_thm, ax_ctm, ax_bm, ax_rm]]
[sb.despine(ax=ax) for ax in [ax_chj, ax_ccj, ax_cbj, ax_crj]]
sb.despine(ax=ax_cnm, bottom=True)
return fig, ax_tt, ax_chj, ax_cbj, ax_ccj, ax_crj
def joint_marginal_plot(df, fw=10, nb=30, gs=25, **kwargs):
return _jplot(df.teff_h, df.teff_c, df.cnt, df.b, df.rho, fw, nb, gs, **kwargs)[0]
|
gpl-2.0
|
stefanosbou/trading-with-python
|
sandbox/spreadCalculations.py
|
78
|
1496
|
'''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
|
bsd-3-clause
|
mraue/pyfact
|
examples/create_dummy_rmf_from_arf.py
|
1
|
4676
|
#===========================================================================
# Copyright (c) 2011-2012, the PyFACT developers
# All rights reserved.
#
# LICENSE
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the PyFACT developers nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE PYFACT DEVELOPERS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===========================================================================
# Imports
import sys
import os
import logging
import numpy as np
import pyfits
import scipy.special
import scipy.interpolate
#import ROOT
import matplotlib.pyplot as plt
# Add script parent directory to python search path to get access to the pyfact package
sys.path.append(os.path.abspath(sys.path[0].rsplit('/', 1)[0]))
import pyfact as pf
#===========================================================================
# Functions
#===========================================================================
# Main
"""
DESCRIPTION MISSING
"""
#---------------------------------------------------------------------------
# Setup
# Setup fancy logging for output
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# Read input file from command line
arf, rmf = '', ''
if len(sys.argv) != 3 :
logging.info('You need to specifify the ARF input file and the output file name.')
sys.exit(0)
else :
arf = sys.argv[1]
rmf = sys.argv[2]
#---------------------------------------------------------------------------
# Open ARF
f = pyfits.open(arf)
ea, ea_erange = pf.arf_to_np(f[1])
nbins = len(ea)
instrument = f[1].header['INSTRUME']
telescope = f[1].header['TELESCOP']
#---------------------------------------------------------------------------
# Create MRF
#rm = np.zeros([nbins, nbins])
#for i in range(nbins) :
# rm[i][i] = 1.
sigma = .2
logerange = np.log10(ea_erange)
logemingrid = logerange[:-1] * np.ones([nbins, nbins])
logemaxgrid = logerange[1:] * np.ones([nbins, nbins])
logecentergrid = np.transpose(((logerange[:-1] + logerange[1:]) / 2.) * np.ones([nbins, nbins]))
#gauss = lambda p, x: p[0] / np.sqrt(2. * np.pi * p[2] ** 2.) * np.exp(- (x - p[1]) ** 2. / 2. / p[2] ** 2.)
gauss_int = lambda p, x_min, x_max: .5 * (scipy.special.erf((x_max - p[1]) / np.sqrt(2. * p[2] ** 2.)) - scipy.special.erf((x_min - p[1]) / np.sqrt(2. * p[2] ** 2.)))
rm = gauss_int([1., 10. ** logecentergrid, sigma * 10. ** logecentergrid], 10. ** logemingrid, 10. ** logemaxgrid)
logging.info('Sanity check, integrated rows should be 1.: {0}'.format(np.sum(rm, axis=1)))
# Create RM hdulist
hdulist = pf.np_to_rmf(rm, ea_erange, ea_erange, 1E-5,
telescope=telescope, instrument=instrument)
# Write RM to file
hdulist.writeto(rmf)
# DEBUG plots
#plt.subplot(221)
#plt.imshow(np.log10(rm[::-1]), interpolation='nearest')
#cb = plt.colorbar()
#plt.subplot(222)
#plt.imshow(logecentergrid, interpolation='nearest')
#cb = plt.colorbar()
#plt.subplot(223)
#plt.imshow(logemingrid, interpolation='nearest')
#cb = plt.colorbar()
#plt.subplot(224)
#plt.imshow(logemaxgrid, interpolation='nearest')
#cb = plt.colorbar()
#plt.show()
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
|
bsd-3-clause
|
russel1237/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
209
|
11733
|
"""
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
|
bsd-3-clause
|
awanke/bokeh
|
bokeh/compat/mplexporter/renderers/vega_renderer.py
|
54
|
5284
|
import warnings
import json
import random
from .base import Renderer
from ..exporter import Exporter
class VegaRenderer(Renderer):
def open_figure(self, fig, props):
self.props = props
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
self.data = []
self.scales = []
self.axes = []
self.marks = []
def open_axes(self, ax, props):
if len(self.axes) > 0:
warnings.warn("multiple axes not yet supported")
self.axes = [dict(type="x", scale="x", ticks=10),
dict(type="y", scale="y", ticks=10)]
self.scales = [dict(name="x",
domain=props['xlim'],
type="linear",
range="width",
),
dict(name="y",
domain=props['ylim'],
type="linear",
range="height",
),]
def draw_line(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'line',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"stroke": {"value": style['color']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['linewidth']},
}
}
})
def draw_markers(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'symbol',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"fill": {"value": style['facecolor']},
"fillOpacity": {"value": style['alpha']},
"stroke": {"value": style['edgecolor']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['edgewidth']},
}
}
})
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
if text_type == 'xlabel':
self.axes[0]['title'] = text
elif text_type == 'ylabel':
self.axes[1]['title'] = text
class VegaHTML(object):
def __init__(self, renderer):
self.specification = dict(width=renderer.figwidth,
height=renderer.figheight,
data=renderer.data,
scales=renderer.scales,
axes=renderer.axes,
marks=renderer.marks)
def html(self):
"""Build the HTML representation for IPython."""
id = random.randint(0, 2 ** 16)
html = '<div id="vis%d"></div>' % id
html += '<script>\n'
html += VEGA_TEMPLATE % (json.dumps(self.specification), id)
html += '</script>\n'
return html
def _repr_html_(self):
return self.html()
def fig_to_vega(fig, notebook=False):
"""Convert a matplotlib figure to vega dictionary
if notebook=True, then return an object which will display in a notebook
otherwise, return an HTML string.
"""
renderer = VegaRenderer()
Exporter(renderer).run(fig)
vega_html = VegaHTML(renderer)
if notebook:
return vega_html
else:
return vega_html.html()
VEGA_TEMPLATE = """
( function() {
var _do_plot = function() {
if ( (typeof vg == 'undefined') && (typeof IPython != 'undefined')) {
$([IPython.events]).on("vega_loaded.vincent", _do_plot);
return;
}
vg.parse.spec(%s, function(chart) {
chart({el: "#vis%d"}).update();
});
};
_do_plot();
})();
"""
|
bsd-3-clause
|
russel1237/scikit-learn
|
benchmarks/bench_plot_parallel_pairwise.py
|
297
|
1247
|
# Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
|
bsd-3-clause
|
tswast/google-cloud-python
|
bigquery/noxfile.py
|
1
|
7026
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import shutil
import nox
LOCAL_DEPS = (os.path.join("..", "api_core[grpc]"), os.path.join("..", "core"))
BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
def default(session):
"""Default unit test session.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
# Install all test dependencies, then install local packages in-place.
session.install("mock", "pytest", "pytest-cov", "freezegun")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", os.path.join("..", "test_utils"))
coverage_fail_under = "--cov-fail-under=97"
# fastparquet is not included in .[all] because, in general, it's redundant
# with pyarrow. We still want to run some unit tests with fastparquet
# serialization, though.
dev_install = ".[all,fastparquet]"
# There is no pyarrow or fastparquet wheel for Python 3.8.
if session.python == "3.8":
# Since many tests are skipped due to missing dependencies, test
# coverage is much lower in Python 3.8. Remove once we can test with
# pyarrow.
coverage_fail_under = "--cov-fail-under=92"
dev_install = ".[pandas,tqdm]"
session.install("-e", dev_install)
# IPython does not support Python 2 after version 5.x
if session.python == "2.7":
session.install("ipython==5.5")
else:
session.install("ipython")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud.bigquery",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
coverage_fail_under,
os.path.join("tests", "unit"),
*session.posargs
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["2.7", "3.7"])
def system(session):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable.")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install local packages in place.
session.install("mock", "pytest", "psutil")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", os.path.join("..", "storage"))
session.install("-e", os.path.join("..", "test_utils"))
session.install("-e", ".[all]")
# IPython does not support Python 2 after version 5.x
if session.python == "2.7":
session.install("ipython==5.5")
else:
session.install("ipython")
# Run py.test against the system tests.
session.run(
"py.test", "--quiet", os.path.join("tests", "system.py"), *session.posargs
)
@nox.session(python=["2.7", "3.7"])
def snippets(session):
"""Run the snippets test suite."""
# Sanity check: Only run snippets tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable.")
# Install all test dependencies, then install local packages in place.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", os.path.join("..", "storage"))
session.install("-e", os.path.join("..", "test_utils"))
session.install("-e", ".[all]")
# Run py.test against the snippets tests.
session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
session.run("py.test", "samples", *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("black", "flake8")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
session.run("flake8", os.path.join("google", "cloud", "bigquery"))
session.run("flake8", "tests")
session.run("flake8", os.path.join("docs", "samples"))
session.run("flake8", os.path.join("docs", "snippets.py"))
session.run("black", "--check", *BLACK_PATHS)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "Pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install("black")
session.run("black", *BLACK_PATHS)
@nox.session(python="3.7")
def docs(session):
"""Build the docs."""
session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", os.path.join("..", "storage"))
session.install("-e", ".[all]")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
|
apache-2.0
|
krez13/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
160
|
6028
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
NifTK/NiftyNet
|
tests/resampler_grid_warper_test.py
|
1
|
13970
|
from __future__ import absolute_import, print_function, division
import base64
import numpy as np
import tensorflow as tf
from niftynet.layer.grid_warper import AffineGridWarperLayer
from niftynet.layer.resampler import ResamplerLayer
from tests.niftynet_testcase import NiftyNetTestCase
test_case_2d_1 = {
'data': "+/b9/+3/377dpX+Mxp+Y/9nT/d/X6vfMuf+hX/hSY/1pvf/P9/z//+///+7z"
"//ve19noiHuXVjlVSCUpwpyH/9i/9+LDwuufS84yGOYKGOgYQspG2v7Q/uXg"
"07aonZBtS1NqWVRycl9zZEY86sSf/+u/7uezlNlvIdYPA/8AAP8AK+MfgMRd"
"f3JGVzYTdV0xW2d9Y2N7c2NuZEgz58CV/+S66OS1jdt2KOclAP8AAP8AFtkB"
"V6Ema1wjkmZDkmdFXGd5XltwdWFqdldF8c2r/+/V//7szP/JOs9AC+gNGvkS"
"P9YlrNp4fl41kVdDj1ZDYWN8ZFdzblFjfVpU/+/a//Hp/e718P/2v/+8bOdb"
"auVOtv6Q9fW/om9eiEg/oGFYXFR9e2GOdEttbkZO7tPI//v2//P/+/f47PjQ"
"3Pmn3fmi3eGm/+rRyZCHhEg9l19Oal2TbU6HeUp2lm17x7Wn5eXZ7e7w9evp"
"+OXH/+yz+uWs3b+b/9/N3a6ebj8lg1Y1ZFyNcFWIelB0fFde2Mu48fjm+f/7"
"+PPt9uLH/+m6/+W24cSk/+TNz62SUS0LeVYuYGGAa1x9dFRpdldS9OXO/P3r"
"8vb1//78//bg8OG28d6z/OjH/+nLwqWHbksrh2JFWmB6ZWB2aVVedl9R893F"
"//Hl//r/++/z//Xh/PDG9Oa38Nqx/uC+ontcek04kWFVYWWKX1x5bWBqZE0/"
"8dO7/+re89HS//Xx/uvK7+Cp/++1/+u74rWMhE8vilJBk1lYWVmNX1iCbF1y"
"VToz58Gs/9rH/tLF/+DG/+y2/uej/+Ki/92pq3hLjVcxlFtHkVZSbGmYTkNt"
"gmqCWzg22K2a/+TL/93C++C1++eq+OOi/+q489GsfVk3dlArkGRJkGFR3dnw"
"lIadXT5NSiEdvpOA/93C8+DA8+rB+PLA/PDI//fn//v47eHVpph9cVo7ZkYt"
"/f37//f678zQxpeRrYJx993G8OvO7vTQ8PbU/fvs/Pj/9/n/9///+P/t8OnM"
"4s2u".encode('ascii'),
'shape': (16, 16, 3)
}
test_case_2d_target = {
# [[0.96592583, -0.25881905, 2.34314575],
# [0.25881905, 0.96592583, -1.79795897]]
'data': "////19jdbXKIZFl3TC5GVzM1yaKR/9vN/ODU7vnR2v/M0v7N9f/2///9////"
"////////pau3Vlx2aF90aFFXkW5a8c+s/uTD6+a8sOiPauRTR/M9a/102P7n"
"/v7///v////9dYGPXmB1cWVzX0c7v5dz/t+z++q8wN+RWdQ9E98ECO8DINkj"
"keSW//76/+z49vf5YmR7X1duc11pdFRF6cGe/+fD9OvEoNyENuIuAv0AAP8B"
"Gu4Qd9thx8mi07Gly8nWZFmBc1l8bUtck3Jp//Te//Ll/f7wxP7DKdIvAPUA"
"BP8CE9wAVKspbWguWjgToZq8bVaOeE5+b0NcqoSD/vTo//T4/fP74f7of+19"
"KugkLPMeTNUvjrhWclgnlmhHc2yYb1SLdkt5jGF1u6OZ5uDU9/L4+/T88fni"
"zPirletwmfF21P6ox7mKhlI8klVDYmGAblp/eVJve1db1ci36+/e8PTz9Ozq"
"+OjO+fC18Pas3eKg+vDM06WVj1BHllZNXWF6aVxwbFFYkXps/fPY+v7v+P35"
"+fLq9+LF/+m3/eOw3L6a/+DO2qmbg0k7lVxLX2B/aF1tZVBLuaOM//Db/fr1"
"+Pn7//309+zM9+K19dyz5suu/N3IwpmDYjcXkGdJYFmFa19zWEE52ryk/+zd"
"/OPm/O/2/fPp/PLP8uS39uK9/+7Q6tC1lHNUXTkVr5aAUUVtemR5XT8368Ww"
"/9zM987I//Ho/+zM8OKx+ey3896z/+fDwJ9+f1o/gFtA2tDGbVlyVTQ/dlBH"
"6sau/uTL/9fB/uK9/+yx+eai/+qr/ee247OLilk7gk88kWFX+Pf13MPJj2Zk"
"kmZZ68as/eLE+eG9+uWw+uWk/OSk/uWs4rqJn2w/iE8xkVVKpXNy/////vj4"
"7NDPuJKF79G58ebK8e3I9fPD+++9/vHO/+vQr45vcEgkiVg4lV1QxaOi////"
"//////////////78+fnv9Pni8PfY/frn/Pj5/f3/9+7lp5Z8eFo4gVdB5drW"
"////".encode("ASCII"),
'shape': (16, 16, 3)
}
def get_2d_images(test_case):
try:
out = base64.decodebytes(test_case['data'])
except AttributeError:
out = base64.decodestring(test_case['data'])
out = np.frombuffer(out, dtype=np.uint8)
out = out.reshape(test_case['shape'])
return out, out.shape
def get_multiple_2d_images():
image_1, shape = get_2d_images(test_case_2d_1)
image_2 = image_1[::-1, ::-1]
image_3 = image_1[::-1, ]
image_4 = image_1[:, ::-1, ]
return np.stack([image_1, image_2, image_3, image_4]), [4] + list(shape)
def get_multiple_2d_rotated_targets():
image_1, shape = get_2d_images(test_case_2d_target)
image_2 = image_1[::-1, ::-1]
image_3 = image_1[::-1, ]
image_4 = image_1[:, ::-1, ]
return np.stack([image_1, image_2, image_3, image_4]), [4] + list(shape)
def get_multiple_2d_targets():
test_image, input_shape = get_multiple_2d_images()
test_target = np.array(test_image)
test_target[0] = test_target[0, ::-1]
test_target[1] = test_target[1, :, ::-1]
test_target[2] = test_target[2, ::-1, ::-1]
factor = 1.5
shape = input_shape[:]
shape[1] = np.floor(input_shape[1] * factor).astype(np.int)
shape[2] = np.floor(input_shape[2] * factor).astype(np.int)
from scipy.ndimage import zoom
zoomed_target = []
for img in test_target:
zoomed_target.append(zoom(img, [factor, factor, 1]))
test_target = np.stack(zoomed_target, axis=0).astype(np.uint8)
return test_target, shape
def get_multiple_3d_images():
image_1, shape = get_2d_images(test_case_2d_1)
image_2 = image_1[::-1, ::-1]
image_3 = image_1[::-1, ]
image_4 = image_1[:, ::-1, ]
image_2d = np.stack([image_1, image_2, image_3, image_4])
image_3d = np.expand_dims(image_2d, axis=1)
image_3d = np.concatenate([image_3d, image_3d], axis=1)
return image_3d, image_3d.shape
def get_multiple_3d_targets():
test_image, input_shape = get_multiple_2d_images()
test_target = np.array(test_image)
test_target[0] = test_target[0, ::-1]
test_target[1] = test_target[1, :, ::-1]
test_target[2] = test_target[2, ::-1, ::-1]
factor = 1.5
shape = input_shape[:]
shape[1] = np.floor(input_shape[1] * factor).astype(np.int)
shape[2] = np.floor(input_shape[2] * factor).astype(np.int)
from scipy.ndimage import zoom
zoomed_target = []
for img in test_target:
zoomed_target.append(zoom(img, [factor, factor, 1]))
test_target = np.stack(zoomed_target, axis=0).astype(np.uint8)
test_target = np.expand_dims(test_target, axis=1)
test_target = np.concatenate([test_target, test_target], axis=1)
return test_target, test_target.shape
def get_3d_input1():
test_case = tf.constant(
[[[[1, 2, -1], [3, 4, -2]], [[5, 6, -3], [7, 8, -4]]],
[[[9, 10, -5], [11, 12, -6]], [[13, 14, -7], [15, 16, -8]]]],
dtype=tf.float32)
return tf.expand_dims(test_case, 4)
class ResamplerGridWarperTest(NiftyNetTestCase):
def _test_correctness(
self, inputs, grid, interpolation, boundary, expected_value):
resampler = ResamplerLayer(
interpolation=interpolation, boundary=boundary)
out = resampler(inputs, grid)
with self.cached_session() as sess:
out_value = sess.run(out)
self.assertAllClose(expected_value, out_value)
def test_combined(self):
expected = [[[[[1], [-1]], [[3], [-2]]],
[[[5], [-3]], [[7], [-4]]]],
[[[[9.5], [-5]], [[11.5], [-6]]],
[[[13.5], [-7]], [[15.5], [-8]]]]]
affine_grid = AffineGridWarperLayer(source_shape=(2, 2, 3),
output_shape=(2, 2, 2))
test_grid = affine_grid(
tf.constant([[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, .5]],
dtype=tf.float32))
self._test_correctness(inputs=get_3d_input1(),
grid=test_grid,
interpolation='idw',
boundary='replicate',
expected_value=expected)
class image_test(NiftyNetTestCase):
def _test_grads_images(self,
interpolation='linear',
boundary='replicate',
ndim=2):
if ndim == 2:
test_image, input_shape = get_multiple_2d_images()
test_target, target_shape = get_multiple_2d_targets()
identity_affine = [[1., 0., 0., 0., 1., 0.]] * 4
else:
test_image, input_shape = get_multiple_3d_images()
test_target, target_shape = get_multiple_3d_targets()
identity_affine = [[1., 0., 0., 0., 1., 0.,
1., 0., 0., 0., 1., 0.]] * 4
affine_var = tf.get_variable('affine', initializer=identity_affine)
grid = AffineGridWarperLayer(source_shape=input_shape[1:-1],
output_shape=target_shape[1:-1],
constraints=None)
warp_coords = grid(affine_var)
resampler = ResamplerLayer(interpolation, boundary=boundary)
new_image = resampler(tf.constant(test_image, dtype=tf.float32),
warp_coords)
diff = tf.reduce_mean(tf.squared_difference(
new_image, tf.constant(test_target, dtype=tf.float32)))
optimiser = tf.train.AdagradOptimizer(0.01)
grads = optimiser.compute_gradients(diff)
opt = optimiser.apply_gradients(grads)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
init_val, affine_val = sess.run([diff, affine_var])
for _ in range(5):
_, diff_val, affine_val = sess.run([opt, diff, affine_var])
print('{}, {}'.format(diff_val, affine_val[0]))
self.assertGreater(init_val, diff_val)
def test_2d_linear_replicate(self):
self._test_grads_images('linear', 'replicate')
def test_2d_idw_replicate(self):
self._test_grads_images('idw', 'replicate')
def test_2d_linear_circular(self):
self._test_grads_images('linear', 'circular')
def test_2d_idw_circular(self):
self._test_grads_images('idw', 'circular')
def test_2d_linear_symmetric(self):
self._test_grads_images('linear', 'symmetric')
def test_2d_idw_symmetric(self):
self._test_grads_images('idw', 'symmetric')
def test_3d_linear_replicate(self):
self._test_grads_images('linear', 'replicate', ndim=3)
def test_3d_idw_replicate(self):
self._test_grads_images('idw', 'replicate', ndim=3)
def test_3d_linear_circular(self):
self._test_grads_images('linear', 'circular', ndim=3)
def test_3d_idw_circular(self):
self._test_grads_images('idw', 'circular', ndim=3)
def test_3d_linear_symmetric(self):
self._test_grads_images('linear', 'symmetric', ndim=3)
def test_3d_idw_symmetric(self):
self._test_grads_images('idw', 'symmetric', ndim=3)
class image_2D_test_converge(NiftyNetTestCase):
def _test_simple_2d_images(self,
interpolation='linear',
boundary='replicate'):
# rotating around the center (8, 8) by 15 degree
expected = [[0.96592583, -0.25881905, 2.34314575],
[0.25881905, 0.96592583, -1.79795897]]
expected = np.asarray(expected).flatten()
test_image, input_shape = get_multiple_2d_images()
test_target, target_shape = get_multiple_2d_rotated_targets()
identity_affine = [[1., 0., 0., 0., 1., 0.],
[1., 0., 0., 0., 1., 0.],
[1., 0., 0., 0., 1., 0.],
[1., 0., 0., 0., 1., 0.]]
affine_var = tf.get_variable('affine', initializer=identity_affine)
grid = AffineGridWarperLayer(source_shape=input_shape[1:-1],
output_shape=target_shape[1:-1],
constraints=None)
warp_coords = grid(affine_var)
resampler = ResamplerLayer(interpolation, boundary=boundary)
new_image = resampler(tf.constant(test_image, dtype=tf.float32),
warp_coords)
diff = tf.reduce_mean(tf.squared_difference(
new_image, tf.constant(test_target, dtype=tf.float32)))
learning_rate = 0.05
if(interpolation == 'linear') and (boundary == 'zero'):
learning_rate = 0.0003
optimiser = tf.train.AdagradOptimizer(learning_rate)
grads = optimiser.compute_gradients(diff)
opt = optimiser.apply_gradients(grads)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
init_val, affine_val = sess.run([diff, affine_var])
# compute the MAE between the initial estimated parameters and the expected parameters
init_var_diff = np.sum(np.abs(affine_val[0] - expected))
for it in range(500):
_, diff_val, affine_val = sess.run([opt, diff, affine_var])
# print('{} diff: {}, {}'.format(it, diff_val, affine_val[0]))
# import matplotlib.pyplot as plt
# plt.figure()
# plt.imshow(test_target[0])
# plt.draw()
# plt.figure()
# plt.imshow(sess.run(new_image).astype(np.uint8)[0])
# plt.draw()
# plt.show()
self.assertGreater(init_val, diff_val)
# compute the MAE between the final estimated parameters and the expected parameters
var_diff = np.sum(np.abs(affine_val[0] - expected))
self.assertGreater(init_var_diff, var_diff)
print('{} {} -- diff {}'.format(
interpolation, boundary, var_diff))
print('{}'.format(affine_val[0]))
def test_2d_linear_zero_converge(self):
self._test_simple_2d_images('linear', 'zero')
def test_2d_linear_replicate_converge(self):
self._test_simple_2d_images('linear', 'replicate')
def test_2d_idw_replicate_converge(self):
self._test_simple_2d_images('idw', 'replicate')
def test_2d_linear_circular_converge(self):
self._test_simple_2d_images('linear', 'circular')
def test_2d_idw_circular_converge(self):
self._test_simple_2d_images('idw', 'circular')
def test_2d_linear_symmetric_converge(self):
self._test_simple_2d_images('linear', 'symmetric')
def test_2d_idw_symmetric_converge(self):
self._test_simple_2d_images('idw', 'symmetric')
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
schoolie/bokeh
|
bokeh/core/tests/test_properties.py
|
1
|
62944
|
from __future__ import absolute_import
import datetime
import unittest
import numpy as np
import pandas as pd
from copy import copy
import pytest
from bokeh.core.properties import (field, value,
NumberSpec, ColorSpec, Bool, Int, Float, Complex, String,
Regex, Seq, List, Dict, Tuple, Instance, Any, Interval, Either,
Enum, Color, DashPattern, Size, Percent, Angle, AngleSpec, StringSpec,
DistanceSpec, FontSizeSpec, Override, Include, MinMaxBounds,
DataDistanceSpec, ScreenDistanceSpec)
from bokeh.core.has_props import HasProps
from bokeh.models import Plot
class Basictest(unittest.TestCase):
def test_simple_class(self):
class Foo(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1, 2, 3])
zz = Dict(String, Int)
s = String(None)
f = Foo()
self.assertEqual(f.x, 12)
self.assertEqual(f.y, "hello")
self.assert_(np.array_equal(np.array([1, 2, 3]), f.z))
self.assertEqual(f.s, None)
self.assertEqual(set(["x", "y", "z", "zz", "s"]), f.properties())
with_defaults = f.properties_with_values(include_defaults=True)
self.assertDictEqual(dict(x=12, y="hello", z=[1,2,3], zz={}, s=None), with_defaults)
without_defaults = f.properties_with_values(include_defaults=False)
self.assertDictEqual(dict(), without_defaults)
f.x = 18
self.assertEqual(f.x, 18)
f.y = "bar"
self.assertEqual(f.y, "bar")
without_defaults = f.properties_with_values(include_defaults=False)
self.assertDictEqual(dict(x=18, y="bar"), without_defaults)
f.z[0] = 100
without_defaults = f.properties_with_values(include_defaults=False)
self.assertDictEqual(dict(x=18, y="bar", z=[100,2,3]), without_defaults)
f.zz = {'a': 10}
without_defaults = f.properties_with_values(include_defaults=False)
self.assertDictEqual(dict(x=18, y="bar", z=[100,2,3], zz={'a': 10}), without_defaults)
def test_enum(self):
class Foo(HasProps):
x = Enum("blue", "red", "green") # the first item is the default
y = Enum("small", "medium", "large", default="large")
f = Foo()
self.assertEqual(f.x, "blue")
self.assertEqual(f.y, "large")
f.x = "red"
self.assertEqual(f.x, "red")
with self.assertRaises(ValueError):
f.x = "yellow"
f.y = "small"
self.assertEqual(f.y, "small")
with self.assertRaises(ValueError):
f.y = "yellow"
def test_inheritance(self):
class Base(HasProps):
x = Int(12)
y = String("hello")
class Child(Base):
z = Float(3.14)
c = Child()
self.assertEqual(frozenset(['x', 'y', 'z']), frozenset(c.properties()))
self.assertEqual(c.y, "hello")
def test_set(self):
class Foo(HasProps):
x = Int(12)
y = Enum("red", "blue", "green")
z = String("blah")
f = Foo()
self.assertEqual(f.x, 12)
self.assertEqual(f.y, "red")
self.assertEqual(f.z, "blah")
f.update(**dict(x=20, y="green", z="hello"))
self.assertEqual(f.x, 20)
self.assertEqual(f.y, "green")
self.assertEqual(f.z, "hello")
with self.assertRaises(ValueError):
f.update(y="orange")
def test_no_parens(self):
class Foo(HasProps):
x = Int
y = Int()
f = Foo()
self.assertEqual(f.x, f.y)
f.x = 13
self.assertEqual(f.x, 13)
def test_accurate_properties_sets(self):
class Base(HasProps):
num = Int(12)
container = List(String)
child = Instance(HasProps)
class Mixin(HasProps):
mixin_num = Int(12)
mixin_container = List(String)
mixin_child = Instance(HasProps)
class Sub(Base, Mixin):
sub_num = Int(12)
sub_container = List(String)
sub_child = Instance(HasProps)
b = Base()
self.assertEqual(set(["child"]),
b.properties_with_refs())
self.assertEqual(set(["container"]),
b.properties_containers())
self.assertEqual(set(["num", "container", "child"]),
b.properties())
self.assertEqual(set(["num", "container", "child"]),
b.properties(with_bases=True))
self.assertEqual(set(["num", "container", "child"]),
b.properties(with_bases=False))
m = Mixin()
self.assertEqual(set(["mixin_child"]),
m.properties_with_refs())
self.assertEqual(set(["mixin_container"]),
m.properties_containers())
self.assertEqual(set(["mixin_num", "mixin_container", "mixin_child"]),
m.properties())
self.assertEqual(set(["mixin_num", "mixin_container", "mixin_child"]),
m.properties(with_bases=True))
self.assertEqual(set(["mixin_num", "mixin_container", "mixin_child"]),
m.properties(with_bases=False))
s = Sub()
self.assertEqual(set(["child", "sub_child", "mixin_child"]),
s.properties_with_refs())
self.assertEqual(set(["container", "sub_container", "mixin_container"]),
s.properties_containers())
self.assertEqual(set(["num", "container", "child",
"mixin_num", "mixin_container", "mixin_child",
"sub_num", "sub_container", "sub_child"]),
s.properties())
self.assertEqual(set(["num", "container", "child",
"mixin_num", "mixin_container", "mixin_child",
"sub_num", "sub_container", "sub_child"]),
s.properties(with_bases=True))
self.assertEqual(set(["sub_num", "sub_container", "sub_child"]),
s.properties(with_bases=False))
# verify caching
self.assertIs(s.properties_with_refs(), s.properties_with_refs())
self.assertIs(s.properties_containers(), s.properties_containers())
self.assertIs(s.properties(), s.properties())
self.assertIs(s.properties(with_bases=True), s.properties(with_bases=True))
# this one isn't cached because we store it as a list __properties__ and wrap it
# in a new set every time
#self.assertIs(s.properties(with_bases=False), s.properties(with_bases=False))
def test_accurate_dataspecs(self):
class Base(HasProps):
num = NumberSpec(12)
not_a_dataspec = Float(10)
class Mixin(HasProps):
mixin_num = NumberSpec(14)
class Sub(Base, Mixin):
sub_num = NumberSpec(16)
base = Base()
mixin = Mixin()
sub = Sub()
self.assertEqual(set(["num"]), base.dataspecs())
self.assertEqual(set(["mixin_num"]), mixin.dataspecs())
self.assertEqual(set(["num", "mixin_num", "sub_num"]), sub.dataspecs())
self.assertDictEqual(dict(num=base.lookup("num")), base.dataspecs_with_props())
self.assertDictEqual(dict(mixin_num=mixin.lookup("mixin_num")), mixin.dataspecs_with_props())
self.assertDictEqual(dict(num=sub.lookup("num"),
mixin_num=sub.lookup("mixin_num"),
sub_num=sub.lookup("sub_num")),
sub.dataspecs_with_props())
def test_not_serialized(self):
class NotSerialized(HasProps):
x = Int(12, serialized=False)
y = String("hello")
o = NotSerialized()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, 'hello')
# non-serialized props are still in the list of props
self.assertTrue('x' in o.properties())
self.assertTrue('y' in o.properties())
# but they aren't in the dict of props with values, since their
# values are not important (already included in other values,
# as with the _units properties)
self.assertTrue('x' not in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
o.x = 42
o.y = 'world'
self.assertTrue('x' not in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' in o.properties_with_values(include_defaults=False))
def test_readonly(self):
class Readonly(HasProps):
x = Int(12, readonly=True) # with default
y = Int(readonly=True) # without default
z = String("hello")
o = Readonly()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, None)
self.assertEqual(o.z, 'hello')
# readonly props are still in the list of props
self.assertTrue('x' in o.properties())
self.assertTrue('y' in o.properties())
self.assertTrue('z' in o.properties())
# but they aren't in the dict of props with values
self.assertTrue('x' not in o.properties_with_values(include_defaults=True))
self.assertTrue('y' not in o.properties_with_values(include_defaults=True))
self.assertTrue('z' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
self.assertTrue('z' not in o.properties_with_values(include_defaults=False))
with self.assertRaises(RuntimeError):
o.x = 7
with self.assertRaises(RuntimeError):
o.y = 7
o.z = "xyz"
self.assertEqual(o.x, 12)
self.assertEqual(o.y, None)
self.assertEqual(o.z, 'xyz')
def test_include_defaults(self):
class IncludeDefaultsTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsTest()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, 'hello')
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
o.x = 42
o.y = 'world'
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' in o.properties_with_values(include_defaults=False))
self.assertTrue('y' in o.properties_with_values(include_defaults=False))
def test_include_defaults_with_kwargs(self):
class IncludeDefaultsKwargsTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsKwargsTest(x=14, y="world")
self.assertEqual(o.x, 14)
self.assertEqual(o.y, 'world')
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' in o.properties_with_values(include_defaults=False))
self.assertTrue('y' in o.properties_with_values(include_defaults=False))
def test_include_defaults_set_to_same(self):
class IncludeDefaultsSetToSameTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsSetToSameTest()
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
# this should no-op
o.x = 12
o.y = "hello"
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
def test_override_defaults(self):
class FooBase(HasProps):
x = Int(12)
class FooSub(FooBase):
x = Override(default=14)
def func_default():
return 16
class FooSubSub(FooBase):
x = Override(default=func_default)
f_base = FooBase()
f_sub = FooSub()
f_sub_sub = FooSubSub()
self.assertEqual(f_base.x, 12)
self.assertEqual(f_sub.x, 14)
self.assertEqual(f_sub_sub.x, 16)
self.assertEqual(12, f_base.properties_with_values(include_defaults=True)['x'])
self.assertEqual(14, f_sub.properties_with_values(include_defaults=True)['x'])
self.assertEqual(16, f_sub_sub.properties_with_values(include_defaults=True)['x'])
self.assertFalse('x' in f_base.properties_with_values(include_defaults=False))
self.assertFalse('x' in f_sub.properties_with_values(include_defaults=False))
self.assertFalse('x' in f_sub_sub.properties_with_values(include_defaults=False))
def test_include_delegate(self):
class IsDelegate(HasProps):
x = Int(12)
y = String("hello")
class IncludesDelegateWithPrefix(HasProps):
z = Include(IsDelegate, use_prefix=True)
z_y = Int(57) # override the Include
class IncludesDelegateWithoutPrefix(HasProps):
z = Include(IsDelegate, use_prefix=False)
y = Int(42) # override the Include
class IncludesDelegateWithoutPrefixUsingOverride(HasProps):
z = Include(IsDelegate, use_prefix=False)
y = Override(default="world") # override the Include changing just the default
o = IncludesDelegateWithoutPrefix()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, 42)
self.assertFalse(hasattr(o, 'z'))
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
o = IncludesDelegateWithoutPrefixUsingOverride()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, 'world')
self.assertFalse(hasattr(o, 'z'))
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
o2 = IncludesDelegateWithPrefix()
self.assertEqual(o2.z_x, 12)
self.assertEqual(o2.z_y, 57)
self.assertFalse(hasattr(o2, 'z'))
self.assertFalse(hasattr(o2, 'x'))
self.assertFalse(hasattr(o2, 'y'))
self.assertFalse('z' in o2.properties_with_values(include_defaults=True))
self.assertFalse('x' in o2.properties_with_values(include_defaults=True))
self.assertFalse('y' in o2.properties_with_values(include_defaults=True))
self.assertTrue('z_x' in o2.properties_with_values(include_defaults=True))
self.assertTrue('z_y' in o2.properties_with_values(include_defaults=True))
self.assertTrue('z_x' not in o2.properties_with_values(include_defaults=False))
self.assertTrue('z_y' not in o2.properties_with_values(include_defaults=False))
# def test_kwargs_init(self):
# class Foo(HasProps):
# x = String
# y = Int
# z = Float
# f = Foo(x = "hello", y = 14)
# self.assertEqual(f.x, "hello")
# self.assertEqual(f.y, 14)
# with self.assertRaises(TypeError):
# # This should raise a TypeError: object.__init__() takes no parameters
# g = Foo(z = 3.14, q = "blah")
class TestNumberSpec(unittest.TestCase):
def test_field(self):
class Foo(HasProps):
x = NumberSpec("xfield")
f = Foo()
self.assertEqual(f.x, "xfield")
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"field": "xfield"})
f.x = "my_x"
self.assertEqual(f.x, "my_x")
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"field": "my_x"})
def test_value(self):
class Foo(HasProps):
x = NumberSpec("xfield")
f = Foo()
self.assertEqual(f.x, "xfield")
f.x = 12
self.assertEqual(f.x, 12)
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"value": 12})
f.x = 15
self.assertEqual(f.x, 15)
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"value": 15})
f.x = dict(value=32)
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"value": 32})
f.x = None
self.assertIs(Foo.__dict__["x"].serializable_value(f), None)
def test_default(self):
class Foo(HasProps):
y = NumberSpec(default=12)
f = Foo()
self.assertEqual(f.y, 12)
self.assertDictEqual(Foo.__dict__["y"].serializable_value(f), {"value": 12})
f.y = "y1"
self.assertEqual(f.y, "y1")
# Once we set a concrete value, the default is ignored, because it is unused
f.y = 32
self.assertEqual(f.y, 32)
self.assertDictEqual(Foo.__dict__["y"].serializable_value(f), {"value": 32})
def test_multiple_instances(self):
class Foo(HasProps):
x = NumberSpec("xfield")
a = Foo()
b = Foo()
a.x = 13
b.x = 14
self.assertEqual(a.x, 13)
self.assertEqual(b.x, 14)
self.assertDictEqual(Foo.__dict__["x"].serializable_value(a), {"value": 13})
self.assertDictEqual(Foo.__dict__["x"].serializable_value(b), {"value": 14})
b.x = {"field": "x3"}
self.assertDictEqual(Foo.__dict__["x"].serializable_value(a), {"value": 13})
self.assertDictEqual(Foo.__dict__["x"].serializable_value(b), {"field": "x3"})
def test_autocreate_no_parens(self):
class Foo(HasProps):
x = NumberSpec
a = Foo()
self.assertIs(a.x, None)
a.x = 14
self.assertEqual(a.x, 14)
def test_set_from_json_keeps_mode(self):
class Foo(HasProps):
x = NumberSpec(default=None)
a = Foo()
self.assertIs(a.x, None)
# set as a value
a.x = 14
self.assertEqual(a.x, 14)
# set_from_json keeps the previous dict-ness or lack thereof
a.set_from_json('x', dict(value=16))
self.assertEqual(a.x, 16)
# but regular assignment overwrites the previous dict-ness
a.x = dict(value=17)
self.assertDictEqual(a.x, dict(value=17))
# set as a field
a.x = "bar"
self.assertEqual(a.x, "bar")
# set_from_json keeps the previous dict-ness or lack thereof
a.set_from_json('x', dict(field="foo"))
self.assertEqual(a.x, "foo")
# but regular assignment overwrites the previous dict-ness
a.x = dict(field="baz")
self.assertDictEqual(a.x, dict(field="baz"))
class TestFontSizeSpec(unittest.TestCase):
def test_font_size_from_string(self):
class Foo(HasProps):
x = FontSizeSpec(default=None)
css_units = "%|em|ex|ch|ic|rem|vw|vh|vi|vb|vmin|vmax|cm|mm|q|in|pc|pt|px"
a = Foo()
self.assertIs(a.x, None)
for unit in css_units.split("|"):
v = '10%s' % unit
a.x = v
self.assertEqual(a.x, dict(value=v))
self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))
v = '10.2%s' % unit
a.x = v
self.assertEqual(a.x, dict(value=v))
self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))
f = '_10%s' % unit
a.x = f
self.assertEqual(a.x, f)
self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))
f = '_10.2%s' % unit
a.x = f
self.assertEqual(a.x, f)
self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))
for unit in css_units.upper().split("|"):
v = '10%s' % unit
a.x = v
self.assertEqual(a.x, dict(value=v))
self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))
v = '10.2%s' % unit
a.x = v
self.assertEqual(a.x, dict(value=v))
self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))
f = '_10%s' % unit
a.x = f
self.assertEqual(a.x, f)
self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))
f = '_10.2%s' % unit
a.x = f
self.assertEqual(a.x, f)
self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))
def test_bad_font_size_values(self):
class Foo(HasProps):
x = FontSizeSpec(default=None)
a = Foo()
with self.assertRaises(ValueError):
a.x = "6"
with self.assertRaises(ValueError):
a.x = 6
with self.assertRaises(ValueError):
a.x = ""
def test_fields(self):
class Foo(HasProps):
x = FontSizeSpec(default=None)
a = Foo()
a.x = "_120"
self.assertEqual(a.x, "_120")
a.x = dict(field="_120")
self.assertEqual(a.x, dict(field="_120"))
a.x = "foo"
self.assertEqual(a.x, "foo")
a.x = dict(field="foo")
self.assertEqual(a.x, dict(field="foo"))
class TestAngleSpec(unittest.TestCase):
def test_default_none(self):
class Foo(HasProps):
x = AngleSpec(None)
a = Foo()
self.assertIs(a.x, None)
self.assertEqual(a.x_units, 'rad')
a.x = 14
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
def test_autocreate_no_parens(self):
class Foo(HasProps):
x = AngleSpec
a = Foo()
self.assertIs(a.x, None)
self.assertEqual(a.x_units, 'rad')
a.x = 14
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
def test_default_value(self):
class Foo(HasProps):
x = AngleSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
def test_setting_dict_sets_units(self):
class Foo(HasProps):
x = AngleSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
a.x = { 'value' : 180, 'units' : 'deg' }
self.assertDictEqual(a.x, { 'value' : 180 })
self.assertEqual(a.x_units, 'deg')
def test_setting_json_sets_units_keeps_dictness(self):
class Foo(HasProps):
x = AngleSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
a.set_from_json('x', { 'value' : 180, 'units' : 'deg' })
self.assertEqual(a.x, 180)
self.assertEqual(a.x_units, 'deg')
def test_setting_dict_does_not_modify_original_dict(self):
class Foo(HasProps):
x = AngleSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
new_value = { 'value' : 180, 'units' : 'deg' }
new_value_copy = copy(new_value)
self.assertDictEqual(new_value_copy, new_value)
a.x = new_value
self.assertDictEqual(a.x, { 'value' : 180 })
self.assertEqual(a.x_units, 'deg')
self.assertDictEqual(new_value_copy, new_value)
class TestDistanceSpec(unittest.TestCase):
def test_default_none(self):
class Foo(HasProps):
x = DistanceSpec(None)
a = Foo()
self.assertIs(a.x, None)
self.assertEqual(a.x_units, 'data')
a.x = 14
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'data')
def test_autocreate_no_parens(self):
class Foo(HasProps):
x = DistanceSpec
a = Foo()
self.assertIs(a.x, None)
self.assertEqual(a.x_units, 'data')
a.x = 14
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'data')
def test_default_value(self):
class Foo(HasProps):
x = DistanceSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'data')
class TestColorSpec(unittest.TestCase):
def test_field(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, "colorfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "colorfield"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
def test_field_default(self):
class Foo(HasProps):
col = ColorSpec(default="red")
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, "red")
self.assertDictEqual(desc.serializable_value(f), {"value": "red"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
def test_default_tuple(self):
class Foo(HasProps):
col = ColorSpec(default=(128, 255, 124))
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, (128, 255, 124))
self.assertDictEqual(desc.serializable_value(f), {"value": "rgb(128, 255, 124)"})
def test_fixed_value(self):
class Foo(HasProps):
col = ColorSpec("gray")
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, "gray")
self.assertDictEqual(desc.serializable_value(f), {"value": "gray"})
def test_named_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "red"
self.assertEqual(f.col, "red")
self.assertDictEqual(desc.serializable_value(f), {"value": "red"})
f.col = "forestgreen"
self.assertEqual(f.col, "forestgreen")
self.assertDictEqual(desc.serializable_value(f), {"value": "forestgreen"})
def test_case_insensitive_named_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "RED"
self.assertEqual(f.col, "RED")
self.assertDictEqual(desc.serializable_value(f), {"value": "RED"})
f.col = "ForestGreen"
self.assertEqual(f.col, "ForestGreen")
self.assertDictEqual(desc.serializable_value(f), {"value": "ForestGreen"})
def test_named_value_set_none(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = None
self.assertDictEqual(desc.serializable_value(f), {"value": None})
def test_named_value_unset(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
self.assertDictEqual(desc.serializable_value(f), {"field": "colorfield"})
def test_named_color_overriding_default(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "forestgreen"
self.assertEqual(f.col, "forestgreen")
self.assertDictEqual(desc.serializable_value(f), {"value": "forestgreen"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
def test_hex_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "#FF004A"
self.assertEqual(f.col, "#FF004A")
self.assertDictEqual(desc.serializable_value(f), {"value": "#FF004A"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
def test_tuple_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = (128, 200, 255)
self.assertEqual(f.col, (128, 200, 255))
self.assertDictEqual(desc.serializable_value(f), {"value": "rgb(128, 200, 255)"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
f.col = (100, 150, 200, 0.5)
self.assertEqual(f.col, (100, 150, 200, 0.5))
self.assertDictEqual(desc.serializable_value(f), {"value": "rgba(100, 150, 200, 0.5)"})
def test_set_dict(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = {"field": "myfield"}
self.assertDictEqual(f.col, {"field": "myfield"})
f.col = "field2"
self.assertEqual(f.col, "field2")
self.assertDictEqual(desc.serializable_value(f), {"field": "field2"})
class TestDashPattern(unittest.TestCase):
def test_named(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
self.assertEqual(f.pat, [])
f.pat = "solid"
self.assertEqual(f.pat, [])
f.pat = "dashed"
self.assertEqual(f.pat, [6])
f.pat = "dotted"
self.assertEqual(f.pat, [2, 4])
f.pat = "dotdash"
self.assertEqual(f.pat, [2, 4, 6, 4])
f.pat = "dashdot"
self.assertEqual(f.pat, [6, 4, 2, 4])
def test_string(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
f.pat = ""
self.assertEqual(f.pat, [])
f.pat = "2"
self.assertEqual(f.pat, [2])
f.pat = "2 4"
self.assertEqual(f.pat, [2, 4])
f.pat = "2 4 6"
self.assertEqual(f.pat, [2, 4, 6])
with self.assertRaises(ValueError):
f.pat = "abc 6"
def test_list(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
f.pat = ()
self.assertEqual(f.pat, ())
f.pat = (2,)
self.assertEqual(f.pat, (2,))
f.pat = (2, 4)
self.assertEqual(f.pat, (2, 4))
f.pat = (2, 4, 6)
self.assertEqual(f.pat, (2, 4, 6))
with self.assertRaises(ValueError):
f.pat = (2, 4.2)
with self.assertRaises(ValueError):
f.pat = (2, "a")
def test_invalid(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
with self.assertRaises(ValueError):
f.pat = 10
with self.assertRaises(ValueError):
f.pat = 10.1
with self.assertRaises(ValueError):
f.pat = {}
class Foo(HasProps):
pass
class Bar(HasProps):
pass
class Baz(HasProps):
pass
class TestProperties(unittest.TestCase):
def test_Any(self):
prop = Any()
self.assertTrue(prop.is_valid(None))
self.assertTrue(prop.is_valid(False))
self.assertTrue(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertTrue(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertTrue(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertTrue(prop.is_valid({}))
self.assertTrue(prop.is_valid(Foo()))
def test_Bool(self):
prop = Bool()
self.assertTrue(prop.is_valid(None))
self.assertTrue(prop.is_valid(False))
self.assertTrue(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(np.bool8(False)))
self.assertTrue(prop.is_valid(np.bool8(True)))
self.assertFalse(prop.is_valid(np.int8(0)))
self.assertFalse(prop.is_valid(np.int8(1)))
self.assertFalse(prop.is_valid(np.int16(0)))
self.assertFalse(prop.is_valid(np.int16(1)))
self.assertFalse(prop.is_valid(np.int32(0)))
self.assertFalse(prop.is_valid(np.int32(1)))
self.assertFalse(prop.is_valid(np.int64(0)))
self.assertFalse(prop.is_valid(np.int64(1)))
self.assertFalse(prop.is_valid(np.uint8(0)))
self.assertFalse(prop.is_valid(np.uint8(1)))
self.assertFalse(prop.is_valid(np.uint16(0)))
self.assertFalse(prop.is_valid(np.uint16(1)))
self.assertFalse(prop.is_valid(np.uint32(0)))
self.assertFalse(prop.is_valid(np.uint32(1)))
self.assertFalse(prop.is_valid(np.uint64(0)))
self.assertFalse(prop.is_valid(np.uint64(1)))
self.assertFalse(prop.is_valid(np.float16(0)))
self.assertFalse(prop.is_valid(np.float16(1)))
self.assertFalse(prop.is_valid(np.float32(0)))
self.assertFalse(prop.is_valid(np.float32(1)))
self.assertFalse(prop.is_valid(np.float64(0)))
self.assertFalse(prop.is_valid(np.float64(1)))
self.assertFalse(prop.is_valid(np.complex64(1.0+1.0j)))
self.assertFalse(prop.is_valid(np.complex128(1.0+1.0j)))
if hasattr(np, "complex256"):
self.assertFalse(prop.is_valid(np.complex256(1.0+1.0j)))
def test_Int(self):
prop = Int()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
# TODO: self.assertFalse(prop.is_valid(np.bool8(False)))
# TODO: self.assertFalse(prop.is_valid(np.bool8(True)))
self.assertTrue(prop.is_valid(np.int8(0)))
self.assertTrue(prop.is_valid(np.int8(1)))
self.assertTrue(prop.is_valid(np.int16(0)))
self.assertTrue(prop.is_valid(np.int16(1)))
self.assertTrue(prop.is_valid(np.int32(0)))
self.assertTrue(prop.is_valid(np.int32(1)))
self.assertTrue(prop.is_valid(np.int64(0)))
self.assertTrue(prop.is_valid(np.int64(1)))
self.assertTrue(prop.is_valid(np.uint8(0)))
self.assertTrue(prop.is_valid(np.uint8(1)))
self.assertTrue(prop.is_valid(np.uint16(0)))
self.assertTrue(prop.is_valid(np.uint16(1)))
self.assertTrue(prop.is_valid(np.uint32(0)))
self.assertTrue(prop.is_valid(np.uint32(1)))
self.assertTrue(prop.is_valid(np.uint64(0)))
self.assertTrue(prop.is_valid(np.uint64(1)))
self.assertFalse(prop.is_valid(np.float16(0)))
self.assertFalse(prop.is_valid(np.float16(1)))
self.assertFalse(prop.is_valid(np.float32(0)))
self.assertFalse(prop.is_valid(np.float32(1)))
self.assertFalse(prop.is_valid(np.float64(0)))
self.assertFalse(prop.is_valid(np.float64(1)))
self.assertFalse(prop.is_valid(np.complex64(1.0+1.0j)))
self.assertFalse(prop.is_valid(np.complex128(1.0+1.0j)))
if hasattr(np, "complex256"):
self.assertFalse(prop.is_valid(np.complex256(1.0+1.0j)))
def test_Float(self):
prop = Float()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
# TODO: self.assertFalse(prop.is_valid(np.bool8(False)))
# TODO: self.assertFalse(prop.is_valid(np.bool8(True)))
self.assertTrue(prop.is_valid(np.int8(0)))
self.assertTrue(prop.is_valid(np.int8(1)))
self.assertTrue(prop.is_valid(np.int16(0)))
self.assertTrue(prop.is_valid(np.int16(1)))
self.assertTrue(prop.is_valid(np.int32(0)))
self.assertTrue(prop.is_valid(np.int32(1)))
self.assertTrue(prop.is_valid(np.int64(0)))
self.assertTrue(prop.is_valid(np.int64(1)))
self.assertTrue(prop.is_valid(np.uint8(0)))
self.assertTrue(prop.is_valid(np.uint8(1)))
self.assertTrue(prop.is_valid(np.uint16(0)))
self.assertTrue(prop.is_valid(np.uint16(1)))
self.assertTrue(prop.is_valid(np.uint32(0)))
self.assertTrue(prop.is_valid(np.uint32(1)))
self.assertTrue(prop.is_valid(np.uint64(0)))
self.assertTrue(prop.is_valid(np.uint64(1)))
self.assertTrue(prop.is_valid(np.float16(0)))
self.assertTrue(prop.is_valid(np.float16(1)))
self.assertTrue(prop.is_valid(np.float32(0)))
self.assertTrue(prop.is_valid(np.float32(1)))
self.assertTrue(prop.is_valid(np.float64(0)))
self.assertTrue(prop.is_valid(np.float64(1)))
self.assertFalse(prop.is_valid(np.complex64(1.0+1.0j)))
self.assertFalse(prop.is_valid(np.complex128(1.0+1.0j)))
if hasattr(np, "complex256"):
self.assertFalse(prop.is_valid(np.complex256(1.0+1.0j)))
def test_Complex(self):
prop = Complex()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertTrue(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
# TODO: self.assertFalse(prop.is_valid(np.bool8(False)))
# TODO: self.assertFalse(prop.is_valid(np.bool8(True)))
self.assertTrue(prop.is_valid(np.int8(0)))
self.assertTrue(prop.is_valid(np.int8(1)))
self.assertTrue(prop.is_valid(np.int16(0)))
self.assertTrue(prop.is_valid(np.int16(1)))
self.assertTrue(prop.is_valid(np.int32(0)))
self.assertTrue(prop.is_valid(np.int32(1)))
self.assertTrue(prop.is_valid(np.int64(0)))
self.assertTrue(prop.is_valid(np.int64(1)))
self.assertTrue(prop.is_valid(np.uint8(0)))
self.assertTrue(prop.is_valid(np.uint8(1)))
self.assertTrue(prop.is_valid(np.uint16(0)))
self.assertTrue(prop.is_valid(np.uint16(1)))
self.assertTrue(prop.is_valid(np.uint32(0)))
self.assertTrue(prop.is_valid(np.uint32(1)))
self.assertTrue(prop.is_valid(np.uint64(0)))
self.assertTrue(prop.is_valid(np.uint64(1)))
self.assertTrue(prop.is_valid(np.float16(0)))
self.assertTrue(prop.is_valid(np.float16(1)))
self.assertTrue(prop.is_valid(np.float32(0)))
self.assertTrue(prop.is_valid(np.float32(1)))
self.assertTrue(prop.is_valid(np.float64(0)))
self.assertTrue(prop.is_valid(np.float64(1)))
self.assertTrue(prop.is_valid(np.complex64(1.0+1.0j)))
self.assertTrue(prop.is_valid(np.complex128(1.0+1.0j)))
if hasattr(np, "complex256"):
self.assertTrue(prop.is_valid(np.complex256(1.0+1.0j)))
def test_String(self):
prop = String()
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Regex(self):
with self.assertRaises(TypeError):
prop = Regex()
prop = Regex("^x*$")
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Seq(self):
with self.assertRaises(TypeError):
prop = Seq()
prop = Seq(Int)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertTrue(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertTrue(prop.is_valid(np.array([])))
self.assertFalse(prop.is_valid(set([])))
self.assertFalse(prop.is_valid({}))
self.assertTrue(prop.is_valid((1, 2)))
self.assertTrue(prop.is_valid([1, 2]))
self.assertTrue(prop.is_valid(np.array([1, 2])))
self.assertFalse(prop.is_valid({1, 2}))
self.assertFalse(prop.is_valid({1: 2}))
self.assertFalse(prop.is_valid(Foo()))
df = pd.DataFrame([1, 2])
self.assertTrue(prop.is_valid(df.index))
self.assertTrue(prop.is_valid(df.iloc[0]))
def test_List(self):
with self.assertRaises(TypeError):
prop = List()
prop = List(Int)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Dict(self):
with self.assertRaises(TypeError):
prop = Dict()
prop = Dict(String, List(Int))
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertTrue(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Tuple(self):
with self.assertRaises(TypeError):
prop = Tuple()
with self.assertRaises(TypeError):
prop = Tuple(Int)
prop = Tuple(Int, String, List(Int))
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid((1, "", [1, 2, 3])))
self.assertFalse(prop.is_valid((1.0, "", [1, 2, 3])))
self.assertFalse(prop.is_valid((1, True, [1, 2, 3])))
self.assertFalse(prop.is_valid((1, "", (1, 2, 3))))
self.assertFalse(prop.is_valid((1, "", [1, 2, "xyz"])))
def test_Instance(self):
with self.assertRaises(TypeError):
prop = Instance()
prop = Instance(Foo)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertTrue(prop.is_valid(Foo()))
self.assertFalse(prop.is_valid(Bar()))
self.assertFalse(prop.is_valid(Baz()))
def test_Instance_from_json(self):
class MapOptions(HasProps):
lat = Float
lng = Float
zoom = Int(12)
v1 = Instance(MapOptions).from_json(dict(lat=1, lng=2))
v2 = MapOptions(lat=1, lng=2)
self.assertTrue(v1.equals(v2))
def test_Interval(self):
with self.assertRaises(TypeError):
prop = Interval()
with self.assertRaises(ValueError):
prop = Interval(Int, 0.0, 1.0)
prop = Interval(Int, 0, 255)
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(127))
self.assertFalse(prop.is_valid(-1))
self.assertFalse(prop.is_valid(256))
prop = Interval(Float, 0.0, 1.0)
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(0.5))
self.assertFalse(prop.is_valid(-0.001))
self.assertFalse(prop.is_valid( 1.001))
def test_Either(self):
with self.assertRaises(TypeError):
prop = Either()
prop = Either(Interval(Int, 0, 100), Regex("^x*$"), List(Int))
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(100))
self.assertFalse(prop.is_valid(-100))
self.assertTrue(prop.is_valid("xxx"))
self.assertFalse(prop.is_valid("yyy"))
self.assertTrue(prop.is_valid([1, 2, 3]))
self.assertFalse(prop.is_valid([1, 2, ""]))
def test_Enum(self):
with self.assertRaises(TypeError):
prop = Enum()
with self.assertRaises(TypeError):
prop = Enum("red", "green", 1)
with self.assertRaises(TypeError):
prop = Enum("red", "green", "red")
prop = Enum("red", "green", "blue")
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid("red"))
self.assertTrue(prop.is_valid("green"))
self.assertTrue(prop.is_valid("blue"))
self.assertFalse(prop.is_valid("RED"))
self.assertFalse(prop.is_valid("GREEN"))
self.assertFalse(prop.is_valid("BLUE"))
self.assertFalse(prop.is_valid(" red"))
self.assertFalse(prop.is_valid(" green"))
self.assertFalse(prop.is_valid(" blue"))
from bokeh.core.enums import LineJoin
prop = Enum(LineJoin)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid("miter"))
self.assertTrue(prop.is_valid("round"))
self.assertTrue(prop.is_valid("bevel"))
self.assertFalse(prop.is_valid("MITER"))
self.assertFalse(prop.is_valid("ROUND"))
self.assertFalse(prop.is_valid("BEVEL"))
self.assertFalse(prop.is_valid(" miter"))
self.assertFalse(prop.is_valid(" round"))
self.assertFalse(prop.is_valid(" bevel"))
from bokeh.core.enums import NamedColor
prop = Enum(NamedColor)
self.assertTrue(prop.is_valid("red"))
self.assertTrue(prop.is_valid("Red"))
self.assertTrue(prop.is_valid("RED"))
def test_Color(self):
prop = Color()
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid((0, 127, 255)))
self.assertFalse(prop.is_valid((0, -127, 255)))
self.assertFalse(prop.is_valid((0, 127)))
self.assertFalse(prop.is_valid((0, 127, 1.0)))
self.assertFalse(prop.is_valid((0, 127, 255, 255)))
self.assertTrue(prop.is_valid((0, 127, 255, 1.0)))
self.assertTrue(prop.is_valid("#00aaff"))
self.assertTrue(prop.is_valid("#00AAFF"))
self.assertTrue(prop.is_valid("#00AaFf"))
self.assertFalse(prop.is_valid("00aaff"))
self.assertFalse(prop.is_valid("00AAFF"))
self.assertFalse(prop.is_valid("00AaFf"))
self.assertFalse(prop.is_valid("#00AaFg"))
self.assertFalse(prop.is_valid("#00AaFff"))
self.assertTrue(prop.is_valid("blue"))
self.assertTrue(prop.is_valid("BLUE"))
self.assertFalse(prop.is_valid("foobar"))
self.assertEqual(prop.transform((0, 127, 255)), "rgb(0, 127, 255)")
self.assertEqual(prop.transform((0, 127, 255, 0.1)), "rgba(0, 127, 255, 0.1)")
def test_DashPattern(self):
prop = DashPattern()
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertTrue(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid("solid"))
self.assertTrue(prop.is_valid("dashed"))
self.assertTrue(prop.is_valid("dotted"))
self.assertTrue(prop.is_valid("dotdash"))
self.assertTrue(prop.is_valid("dashdot"))
self.assertFalse(prop.is_valid("DASHDOT"))
self.assertTrue(prop.is_valid([1, 2, 3]))
self.assertFalse(prop.is_valid([1, 2, 3.0]))
self.assertTrue(prop.is_valid("1 2 3"))
self.assertFalse(prop.is_valid("1 2 x"))
def test_Size(self):
prop = Size()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(100))
self.assertTrue(prop.is_valid(100.1))
self.assertFalse(prop.is_valid(-100))
self.assertFalse(prop.is_valid(-0.001))
def test_Percent(self):
prop = Percent()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(0.5))
self.assertFalse(prop.is_valid(-0.001))
self.assertFalse(prop.is_valid( 1.001))
def test_Angle(self):
prop = Angle()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_MinMaxBounds_with_no_datetime(self):
prop = MinMaxBounds(accept_datetime=False)
# Valid values
self.assertTrue(prop.is_valid('auto'))
self.assertTrue(prop.is_valid(None))
self.assertTrue(prop.is_valid((12, 13)))
self.assertTrue(prop.is_valid((-32, -13)))
self.assertTrue(prop.is_valid((12.1, 13.1)))
self.assertTrue(prop.is_valid((None, 13.1)))
self.assertTrue(prop.is_valid((-22, None)))
# Invalid values
self.assertFalse(prop.is_valid('string'))
self.assertFalse(prop.is_valid(12))
self.assertFalse(prop.is_valid(('a', 'b')))
self.assertFalse(prop.is_valid((13, 12)))
self.assertFalse(prop.is_valid((13.1, 12.2)))
self.assertFalse(prop.is_valid((datetime.date(2012, 10, 1), datetime.date(2012, 12, 2))))
def test_MinMaxBounds_with_datetime(self):
prop = MinMaxBounds(accept_datetime=True)
# Valid values
self.assertTrue(prop.is_valid((datetime.date(2012, 10, 1), datetime.date(2012, 12, 2))))
# Invalid values
self.assertFalse(prop.is_valid((datetime.date(2012, 10, 1), 22)))
def test_HasProps_equals():
class Foo(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1,2,3])
class FooUnrelated(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1,2,3])
v = Foo().equals(Foo())
assert v is True
v = Foo(x=1).equals(Foo(x=1))
assert v is True
v = Foo(x=1).equals(Foo(x=2))
assert v is False
v = Foo(x=1).equals(1)
assert v is False
v = Foo().equals(FooUnrelated())
assert v is False
def test_HasProps_clone():
p1 = Plot(plot_width=1000)
c1 = p1.properties_with_values(include_defaults=False)
p2 = p1._clone()
c2 = p2.properties_with_values(include_defaults=False)
assert c1 == c2
def test_HasProps_pretty():
class Foo1(HasProps):
a = Int(12)
b = String("hello")
assert Foo1().pretty() == "bokeh.core.tests.test_properties.Foo1(a=12, b='hello')"
class Foo2(HasProps):
a = Int(12)
b = String("hello")
c = List(Int, [1, 2, 3])
assert Foo2().pretty() == "bokeh.core.tests.test_properties.Foo2(a=12, b='hello', c=[1, 2, 3])"
class Foo3(HasProps):
a = Int(12)
b = String("hello")
c = List(Int, [1, 2, 3])
d = Float(None)
assert Foo3().pretty() == "bokeh.core.tests.test_properties.Foo3(a=12, b='hello', c=[1, 2, 3], d=None)"
class Foo4(HasProps):
a = Int(12)
b = String("hello")
c = List(Int, [1, 2, 3])
d = Float(None)
e = Instance(Foo1, lambda: Foo1())
assert Foo4().pretty() == """\
bokeh.core.tests.test_properties.Foo4(
a=12,
b='hello',
c=[1, 2, 3],
d=None,
e=bokeh.core.tests.test_properties.Foo1(a=12, b='hello'))"""
class Foo5(HasProps):
foo6 = Any # can't use Instance(".core.tests.test_properties.Foo6")
class Foo6(HasProps):
foo5 = Instance(Foo5)
f5 = Foo5()
f6 = Foo6(foo5=f5)
f5.foo6 = f6
assert f5.pretty() == """\
bokeh.core.tests.test_properties.Foo5(
foo6=bokeh.core.tests.test_properties.Foo6(
foo5=bokeh.core.tests.test_properties.Foo5(...)))"""
def test_field_function():
assert field("foo") == dict(field="foo")
# TODO (bev) would like this to work I think
#assert field("foo", transform="junk") == dict(field="foo", transform="junk")
def test_value_function():
assert value("foo") == dict(value="foo")
# TODO (bev) would like this to work I think
#assert value("foo", transform="junk") == dict(value="foo", transform="junk")
def test_strict_dataspec_key_values():
for typ in (NumberSpec, StringSpec, FontSizeSpec, ColorSpec, DataDistanceSpec, ScreenDistanceSpec):
class Foo(HasProps):
x = typ("x")
f = Foo()
with pytest.raises(ValueError):
f.x = dict(field="foo", units="junk")
def test_strict_unitspec_key_values():
class FooUnits(HasProps):
x = DistanceSpec("x")
f = FooUnits()
f.x = dict(field="foo", units="screen")
with pytest.raises(ValueError):
f.x = dict(field="foo", units="junk", foo="crap")
class FooUnits(HasProps):
x = AngleSpec("x")
f = FooUnits()
f.x = dict(field="foo", units="deg")
with pytest.raises(ValueError):
f.x = dict(field="foo", units="junk", foo="crap")
|
bsd-3-clause
|
prheenan/Research
|
Perkins/AnalysisUtil/Gels/ImageJUtil.py
|
1
|
6779
|
# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import GeneralUtil.python.GenUtilities as pGenUtil
from collections import OrderedDict
sys.path.append("../../../../")
import re
class LaneObject(object):
"""
Class to keep track of (generalized) lanes
"""
def __init__(self,*lanes):
self.Lanes = np.array(list(lanes))
self.TotalIntensity = np.sum(self.Lanes)
def Normalized(self,LaneIndex):
"""
Returns the normalized pixel intensity in laneindex "LaneIndex"
"""
assert LaneIndex < len(self.Lanes) , "Asked for a lane we dont have"
return self.Lanes[LaneIndex]/self.TotalIntensity
def NormalizedIntensities(self):
"""
Returns a list of all the normalized intensities
"""
return [self.Normalized(i) for i in range(len(self.Lanes))]
def __str__(self):
return "\n".join("Lane{:03d}={:.2f}".format(i,self.Normalized(i))
for i in range(self.Lanes.size))
class OverhangLane(LaneObject):
"""
Class to keep track of the bands in an overhang lane
"""
def __init__(self,Linear,Circular=0,Concat=0,*args):
if len(args) > 0:
Concat += sum(args)
super(OverhangLane,self).__init__(Linear,Circular,Concat,*args)
self.LinearBand=Linear
self.CircularBand = Circular
self.Concatemers = Concat
def _Norm(self,x):
return x/self.TotalIntensity
@property
def LinearRelative(self):
"""
Gets the relative linear intensity
"""
return self._Norm(self.LinearBand)
@property
def CircularRelative(self):
"""
Gets the relative circular intensity
"""
return self._Norm(self.CircularBand)
@property
def ConcatemerRelative(self):
"""
Gets the relative concatemer intensity
"""
return self._Norm(self.Concatemers)
def __str__(self):
return "Lin:{:3.2f},Circ:{:3.2f},Concat:{:3.2f}".\
format(self.LinearRelative,
self.CircularRelative,
self.ConcatemerRelative)
def __repr__(self):
return str(self)
class LaneTrialObject(object):
"""
Idea of this class is to represent multiple lanes, each of which
have the same contents loaded
"""
def __init__(self,lanes):
self.lanes = lanes
def MeanLane(self,idx):
"""
Get the mean value of all the lane 'copies', for a given lane
"""
return np.mean([l.Normalized(idx) for l in self.lanes])
def StdevLane(self,idx):
"""
Get standard deviation of all the lane 'copies', for a given lane
"""
return np.std([l.Normalized(idx) for l in self.lanes])
def MeanStdev(self,idx):
return self.MeanLane(idx),self.StdevLane(idx)
def __str__(self):
return "\n".join("{:s}".format(l) for l in self.lanes)
class OverhangTrialObject(LaneTrialObject):
def __init__(self,lanes):
super(OverhangTrialObject,self).__init__(lanes)
@property
def LinearMeanStdev(self):
"""
Returns a tuple of the mean and standard deviation for the linear lane
"""
return self.MeanStdev(0)
@property
def CircularMeanStdev(self):
"""
Returns a tuple of the mean and standard deviation for the circular lane
"""
return self.MeanStdev(1)
@property
def ConcatemerMeanStdev(self):
"""
Returns a tuple of mean and standard deviation for concatemer lane(s)
"""
return self.MeanStdev(2)
@property
def LinearRelative(self):
"""
Returns the mean for the linear lane
"""
return self.LinearMeanStdev[0]
@property
def CircularRelative(self):
"""
Returns the mean for the circular lane
"""
return self.CircularMeanStdev[0]
@property
def ConcatemerRelative(self):
"""
Returns the mean for the concatemer lane
"""
return self.ConcatemerMeanStdev[0]
def GetErrors(self):
"""
Returns a list of the stdevs for linear, circular, and concatemer
"""
props = [self.LinearMeanStdev,self.CircularMeanStdev,
self.ConcatemerMeanStdev]
return [p[1] for p in props]
def GetImageJData(DataDirBase,ext=".xls"):
"""
Given a base data directory, finds all files with ext in each subdirectory
Args:
DataDirBase: base data directory. Each subdirectory has files with
extension 'ext'
ext: file extension
Returns:
ordered dictionary of <subdir:fullpaths>
"""
Voltages = OrderedDict()
for f in sorted(os.listdir(DataDirBase)):
PossibleSubDir = DataDirBase + f +"/"
if (os.path.isdir(PossibleSubDir)):
Files = pGenUtil.getAllFiles(PossibleSubDir,".xls")
Voltages[f] =Files
return Voltages
def ReadFileToLaneObj(File):
"""
Given a file, get it as a lane object.
Args:
File: see ReadFileToOverhangObj
Returns:
LaneObject
"""
return LaneObject(*list(GetImageJMeasurements(File)))
def ReadFileToOverhangObj(File):
"""
get the file's data, as an OverhangLane object
Args:
File: full path to the file to read. Must be formatted with the second
column being the intensitieis, and the rows being <concat if any,
circular if any, linear>
Returns:
OverhangLane object
"""
Measurements = GetImageJMeasurements(File).tolist()
if type(Measurements) is float:
Measurements = [Measurements]
# reverse so it goes linear,circular,conat
return OverhangLane(*(Measurements[::-1]))
def GetImageJMeasurements(File):
"""
Returns the in-order values of the intensity column in the ImageJ xls file
Args:
File: to read from
Returns:
intensity column
"""
return np.loadtxt(File,skiprows=1,usecols=(1,))
def GetLaneTrialsMatchingName(DataDirBase,FilePattern,ext=".xls"):
FileNames = pGenUtil.getAllFiles(DataDirBase,ext)
Copies = []
for f in FileNames:
if (re.match(FilePattern, f) is not None):
print("si")
Copies.append(f)
assert len(Copies) > 0 , "Couldn't find any files to load"
# now get the lanes from the files
Lanes = [ReadFileToOverhangObj(f) for f in Copies]
# now convert the lanes to a single object, with the means and standard
# deviations
return OverhangTrialObject(Lanes)
|
gpl-3.0
|
caganze/splat
|
splat/euclid.py
|
2
|
5775
|
from __future__ import print_function, division
"""
.. note::
These are functions related to the EUCLID analysis based on SPLAT tools
"""
#import astropy
#import copy
#from datetime import datetime
#import os
#import re
#import requests
#from splat import SPLAT_PATH, SPLAT_URL
#from scipy import stats
#from astropy.io import ascii, fits # for reading in spreadsheet
#from astropy.table import Table, join # for reading in table files
#from astropy.coordinates import SkyCoord
# imports: internal
import copy
import os
import time
# imports: external
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy
import pandas
from astropy import units as u # standard units
from scipy.interpolate import interp1d
from scipy.integrate import trapz # for numerical integration
# imports: splat
from .core import getSpectrum, classifyByIndex
import splat.empirical as spem
import splat.evolve as spev
import splat.plot as splot
# some euclid parameters
EUCLID_WAVERANGE = [1.25,1.85]
EUCLID_RESOLUTION = 250
EUCLID_NOISE = 3e-15*u.erg/u.s/u.cm**2/u.micron
EUCLID_SAMPLING = 0.0013 # micron/pixel
EUCLID_SLITWIDTH = numpy.mean(EUCLID_WAVERANGE)/EUCLID_RESOLUTION/EUCLID_SAMPLING # micron/pixel
# program to convert spex spectra in to euclid spectra
def spexToEuclid(sp):
'''
:Purpose: Convert a SpeX file into EUCLID form, using the resolution and wavelength coverage
defined from the Euclid Red Book (`Laurijs et al. 2011 <http://sci.esa.int/euclid/48983-euclid-definition-study-report-esa-sre-2011-12/>`_). This function changes the input Spectrum
objects, which can be restored by the Spectrum.reset() method.
:param sp: Spectrum class object, which should contain wave, flux and noise array elements
:Example:
>>> import splat
>>> sp = splat.getSpectrum(lucky=True)[0] # grab a random file
>>> splat.spexToEuclid(sp)
>>> min(sp.wave), max(sp.wave)
(<Quantity 1.25 micron>, <Quantity 1.8493000000000364 micron>)
>>> sp.history
[``'Spectrum successfully loaded``',
``'Converted to EUCLID format``']
>>> sp.reset()
>>> min(sp.wave), max(sp.wave)
(<Quantity 0.6454827785491943 micron>, <Quantity 2.555659770965576 micron>)
'''
sp.resolution = EUCLID_RESOLUTION
sp.slitpixelwidth = EUCLID_SLITWIDTH
f = interp1d(sp.wave,sp.flux)
n = interp1d(sp.wave,sp.noise)
sp.wave = numpy.arange(EUCLID_WAVERANGE[0],EUCLID_WAVERANGE[1],EUCLID_SAMPLING)*sp.wunit
sp.flux = f(sp.wave/sp.wunit)*sp.funit
sp.noise = n(sp.wave/sp.wunit)*sp.funit
# update other spectrum elements
sp.snr = sp.computeSN()
sp.flam = sp.flux
sp.nu = sp.wave.to('Hz',equivalencies=u.spectral())
sp.fnu = sp.flux.to('Jy',equivalencies=u.spectral_density(sp.wave))
sp.variance = sp.noise**2
sp.dof = numpy.round(len(sp.wave)/sp.slitpixelwidth)
sp.history.append('Converted to EUCLID format')
# adds noise to spectrum
def addEuclidNoise(sp):
'''
:Purpose: Adds Gaussian noise to a EUCLID-formatted spectrum assuming a constant noise
model of 3e-15 erg/s/cm2/micron (as extrapolated from the Euclid Red Book;
Laurijs et al. 2011 <http://sci.esa.int/euclid/48983-euclid-definition-study-report-esa-sre-2011-12/>`_).
Note that noise is added to both flux and (in quadrature) variance. This function creates a
new Spectrum object so as not to corrupt the original data.
:param sp: Spectrum class object, which should contain wave, flux and noise array elements
:Output: Spectrum object with Euclid noise added in
:Example:
>>> import splat
>>> sp = splat.getSpectrum(lucky=True)[0] # grab a random file
>>> splat.spexToEuclid(sp)
>>> sp.normalize()
>>> sp.scale(1.e-14)
>>> sp.computeSN()
115.96374031163553
>>> sp_noisy = splat.addEculidNoise(sp)
>>> sp_noisy.computeSN()
3.0847209519763172
'''
sp2 = copy.deepcopy(sp)
bnoise = numpy.zeros(len(sp2.noise))+EUCLID_NOISE
bnoise.to(sp2.funit,equivalencies=u.spectral())
anoise = numpy.random.normal(0,EUCLID_NOISE/sp2.funit,len(bnoise))*sp2.funit
sp2.flux = sp2.flux+anoise
sp2.variance = sp2.variance+bnoise**2
sp2.noise = sp2.variance**0.5
sp2.snr = sp2.computeSN()
sp2.history.append('Added spectral noise based on Euclid sensitivity')
return sp2
if __name__ == '__main__':
'''
Test function for splat_euclid functions, taking an 0559-1404 spectrum and plotting a 3
apparent magnitudes with corresponding distances
'''
ofold = '/Users/adam/projects/splat/euclid/'
sp = getSpectrum(shortname='J0559-1404')[0]
spt = 'T4.5'
filter = 'MKO H'
m1 = spemp.typeToMag(spt,filter)[0]
m2 = 21
d2 = spemp.estimateDistance(sp,spt=spt,mag=m2, absmag=m1)[0]
m3 = 19
d3 = spemp.estimateDistance(sp,spt=spt,mag=m3, absmag=m1)[0]
spexToEuclid(sp)
sp.normalize()
sp.fluxCalibrate(filter,m2)
print(sp.snr)
sp2 = addEuclidNoise(sp)
print(sp2.snr)
sp.fluxCalibrate(filter,m3)
sp3 = addEuclidNoise(sp)
print(sp3.snr)
sp.normalize()
sp2.normalize()
sp3.normalize()
cls1 = classifyByIndex(sp)
cls2 = classifyByIndex(sp2)
cls3 = classifyByIndex(sp3)
splot.plotSpectrum(sp,sp3,sp2,colors=['k','b','r'],stack=0.5,xrange=EUCLID_WAVERANGE,\
yrange=[-0.3,2.2],output=ofold+'spectral_degradation.eps',legend=[\
'J0559-1404 MH = {:.1f}, SpT = {}+/-{:.1f}'.format(m1,cls1[0],cls1[1]),\
'H = {:.1f}, d = {:.1f} pc, SpT = {}+/-{:.1f}'.format(m3,d3,cls3[0],cls3[1]),\
'H = {:.1f}, d = {:.1f} pc, SpT = {}+/-{:.1f}'.format(m2,d2,cls2[0],cls2[1])])
print('For H = {}, SpT = {}+/-{}'.format(m1,spt[0],spt[1]))
print('For H = {}, SpT = {}+/-{}'.format(m3,spt[0],spt[1]))
print('For H = {}, SpT = {}+/-{}'.format(m2,spt[0],spt[1]))
|
mit
|
pgora/TensorTraffic
|
ErrorDistribution/tt_plots.py
|
1
|
2297
|
from train import *
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization
from keras.optimizers import Adam
import matplotlib.pyplot as plt
f = ContextFilter()
logging.getLogger('tensorflow').setLevel(logging.INFO)
logging.getLogger('tensorflow').addFilter(f)
all_data = tf.contrib.learn.datasets.base.load_csv_without_header(
filename='micro_data.csv',
target_dtype=np.float32,
features_dtype=np.float32)
X = all_data.data
y = all_data.target
X = np.delete(X, [15, 16], 1)
X = (X - np.mean(X, axis=0, keepdims=True)) / np.std(X, axis=0, keepdims=True)
y_std = np.std(y)
y_mean = np.mean(y)
y = (y - y_mean) / y_std
def build_model(no_layers=2, no_units=100, dropout=0.6):
model = Sequential()
model.add(Dense(no_units, input_dim=X.shape[1], activation='relu'))
model.add(Dropout(dropout))
model.add(BatchNormalization())
for i in range(no_layers - 1):
model.add(Dense(no_units, activation='relu'))
model.add(Dropout(dropout))
model.add(BatchNormalization())
model.add(Dense(1))
model.compile(loss='mae', metrics=[], optimizer=Adam(lr=0.001))
return model
model = build_model(dropout=0.4)
def train_test_split(X, y, train_ratio):
h = np.random.permutation(X.shape[0])
n_train = int(train_ratio * X.shape[0])
X_train = X[h[:n_train], :]
X_test = X[h[n_train:], :]
y_train = y[h[:n_train]]
y_test = y[h[n_train:]]
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test_split(X, y, 0.8)
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=1000, batch_size=8192, verbose=0)
#matplotlib inline
plt.plot(history.history['loss'][200:], c='red')
plt.plot(history.history['val_loss'][200:], c='blue')
def get_errors(actual, predicted):
actual = actual.flatten()
predicted = predicted.flatten()
actual = actual * y_std + y_mean
predicted = predicted * y_std + y_mean
error = np.abs(actual - predicted)
rel_error = np.abs(actual - predicted) / actual
return np.max(error), np.mean(error), np.max(rel_error), np.mean(rel_error)
predicted = model.predict(X_test)
get_errors(y_test, predicted)
predicted = model.predict(X_train)
get_errors(y_train, predicted)
|
mit
|
CCS-Lab/hBayesDM
|
Python/hbayesdm/diagnostics.py
|
1
|
5221
|
from typing import List, Dict, Sequence, Union
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import arviz as az
from hbayesdm.base import TaskModel
__all__ = ['rhat', 'print_fit', 'hdi', 'plot_hdi', 'extract_ic']
def rhat(model_data: TaskModel,
less: float = None) -> Dict[str, Union[List, bool]]:
"""Function for extracting Rhat values from hbayesdm output.
Convenience function for extracting Rhat values from hbayesdm output.
Also possible to check if all Rhat values are less than a specified value.
Parameters
----------
model_data
Output instance of running an hbayesdm model function.
less
[Optional] Upper-bound value to compare extracted Rhat values to.
Returns
-------
Dict
Keys are names of the parameters; values are their Rhat values.
Or if `less` was specified, the dictionary values will hold `True` if
all Rhat values (of that parameter) are less than or equal to `less`.
"""
rhat_data = az.rhat(model_data.fit)
if less is None:
return {v.name: v.values.tolist()
for v in rhat_data.data_vars.values()}
else:
return {v.name: v.values.item()
for v in (rhat_data.max() <= less).data_vars.values()}
def print_fit(*args: TaskModel, ic: str = 'looic') -> pd.DataFrame:
"""Print model-fits (mean LOOIC or WAIC values) of hbayesdm models.
Parameters
----------
args
Output instances of running hbayesdm model functions.
ic
Information criterion (defaults to 'looic').
Returns
-------
pd.DataFrame
Model-fit info per each hbayesdm output given as argument(s).
"""
ic_options = ('looic', 'waic')
if ic not in ic_options:
raise RuntimeError(
'Information Criterion (ic) must be one of ' + repr(ic_options))
dataset_dict = {
model_data.model:
az.from_pystan(model_data.fit, log_likelihood='log_lik')
for model_data in args
}
ic = 'loo' if ic == 'looic' else 'waic'
return az.compare(dataset_dict=dataset_dict, ic=ic)
def hdi(x: np.ndarray, credible_interval: float = 0.94) -> np.ndarray:
"""Calculate highest density interval (HDI).
This function acts as an alias to `arviz.hpd` function.
Parameters
----------
x
Array containing MCMC samples.
credible_interval
Credible interval to compute. Defaults to 0.94.
Returns
-------
np.ndarray
Array containing the lower and upper value of the computed interval.
"""
return az.hpd(x, credible_interval=credible_interval)
def plot_hdi(x: np.ndarray,
credible_interval: float = 0.94,
title: str = None,
xlabel: str = 'Value',
ylabel: str = 'Density',
point_estimate: str = None,
bins: Union[int, Sequence, str] = 'auto',
round_to: int = 2,
**kwargs):
"""Plot highest density interval (HDI).
This function redirects input to `arviz.plot_posterior` function.
Parameters
----------
x
Array containing MCMC samples.
credible_interval
Credible interval to plot. Defaults to 0.94.
title
String to set as title of plot.
xlabel
String to set as the x-axis label.
ylabel
String to set as the y-axis label.
point_estimate
Defaults to None. Possible options are 'mean', 'median', 'mode'.
bins
Controls the number of bins. Defaults to 'auto'.
Accepts the same values (or keywords) as plt.hist() does.
round_to
Controls formatting for floating point numbers. Defaults to 2.
**kwargs
Passed as-is to plt.hist().
"""
kwargs.setdefault('color', 'black')
ax = az.plot_posterior(x,
kind='hist',
credible_interval=credible_interval,
point_estimate=point_estimate,
bins=bins,
round_to=round_to,
**kwargs).item()
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
def extract_ic(model_data: TaskModel,
ic: str = 'both',
ncore: int = 2) \
-> Dict:
"""Extract model comparison estimates.
Parameters
----------
model_data
hBayesDM output objects from running model functions.
ic
Information criterion. 'looic', 'waic', or 'both'. Defaults to 'both'.
ncore
Number of cores to use when computing LOOIC. Defaults to 2.
Returns
-------
Dict
Leave-One-Out and/or Watanabe-Akaike information criterion estimates.
"""
ic_options = ('looic', 'waic', 'both')
if ic not in ic_options:
raise RuntimeError(
'Information Criterion (ic) must be one of ' + repr(ic_options))
dat = az.from_pystan(model_data.fit, log_likelihood='log_lik')
ret = {}
if ic in ['looic', 'both']:
ret['looic'] = az.loo(dat)['loo']
if ic in ['waic', 'both']:
ret['waic'] = az.waic(dat)['waic']
return ret
|
gpl-3.0
|
cython-testbed/pandas
|
pandas/tests/frame/test_indexing.py
|
3
|
130466
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings, simplefilter
from datetime import datetime, date, timedelta, time
from pandas.compat import map, zip, range, lrange, lzip, long
from pandas import compat
from numpy import nan
from numpy.random import randn
import pytest
import numpy as np
import pandas.core.common as com
from pandas import (DataFrame, Index, Series, notna, isna,
MultiIndex, DatetimeIndex, Timestamp,
date_range, Categorical)
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas._libs.tslib import iNaT
from pandas.tseries.offsets import BDay
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer,
is_scalar)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
from pandas.core.indexing import IndexingError
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameIndexing(TestData):
def test_getitem(self):
# Slicing
sl = self.frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in compat.iteritems(sl):
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in compat.iteritems(self.frame._series):
assert self.frame[key] is not None
assert 'random' not in self.frame
with tm.assert_raises_regex(KeyError, 'random'):
self.frame['random']
df = self.frame.copy()
df['$10'] = randn(len(df))
ad = randn(len(df))
df['@awesome_domain'] = ad
with pytest.raises(KeyError):
df.__getitem__('df["$10"]')
res = df['@awesome_domain']
tm.assert_numpy_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
with pytest.raises(KeyError):
df[['baf']]
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
assert self.frame.get('foo') is None
assert_series_equal(self.frame.get('foo', self.frame['B']),
self.frame['B'])
@pytest.mark.parametrize("df", [
DataFrame(),
DataFrame(columns=list("AB")),
DataFrame(columns=list("AB"), index=range(3))
])
def test_get_none(self, df):
# see gh-5652
assert df.get(None) is None
def test_loc_iterable(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.loc[:, idx]
expected = self.frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"idx_type",
[list, iter, Index, set,
lambda l: dict(zip(l, range(len(l)))),
lambda l: dict(zip(l, range(len(l)))).keys()],
ids=["list", "iter", "Index", "set", "dict", "dict_keys"])
@pytest.mark.parametrize("levels", [1, 2])
def test_getitem_listlike(self, idx_type, levels):
# GH 21294
if levels == 1:
frame, missing = self.frame, 'food'
else:
# MultiIndex columns
frame = DataFrame(randn(8, 3),
columns=Index([('foo', 'bar'), ('baz', 'qux'),
('peek', 'aboo')],
name=('sth', 'sth2')))
missing = ('good', 'food')
keys = [frame.columns[1], frame.columns[0]]
idx = idx_type(keys)
idx_check = list(idx_type(keys))
result = frame[idx]
expected = frame.loc[:, idx_check]
expected.columns.names = frame.columns.names
assert_frame_equal(result, expected)
idx = idx_type(keys + [missing])
with tm.assert_raises_regex(KeyError, 'not in index'):
frame[idx]
def test_getitem_callable(self):
# GH 12533
result = self.frame[lambda x: 'A']
tm.assert_series_equal(result, self.frame.loc[:, 'A'])
result = self.frame[lambda x: ['A', 'B']]
tm.assert_frame_equal(result, self.frame.loc[:, ['A', 'B']])
df = self.frame[:3]
result = df[lambda x: [True, False, True]]
tm.assert_frame_equal(result, self.frame.iloc[[0, 2], :])
def test_setitem_list(self):
self.frame['E'] = 'foo'
data = self.frame[['A', 'B']]
self.frame[['B', 'A']] = data
assert_series_equal(self.frame['B'], data['A'], check_names=False)
assert_series_equal(self.frame['A'], data['B'], check_names=False)
with tm.assert_raises_regex(ValueError,
'Columns must be same length as key'):
data[['A']] = self.frame[['A', 'B']]
with tm.assert_raises_regex(ValueError, 'Length of values '
'does not match '
'length of index'):
data['A'] = range(len(data.index) - 1)
df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)
df.loc[1, ['tt1', 'tt2']] = [1, 2]
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
assert_series_equal(result, expected)
df['tt1'] = df['tt2'] = '0'
df.loc[df.index[1], ['tt1', 'tt2']] = ['1', '2']
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series(['1', '2'], df.columns, name=1)
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
data = np.random.randn(len(self.frame), 2)
self.frame[['A', 'B']] = data
assert_almost_equal(self.frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
tuples = lzip(self.frame['A'], self.frame['B'])
self.frame['tuples'] = tuples
result = self.frame['tuples']
expected = Series(tuples, index=self.frame.index, name='tuples')
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
# GH7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ['jim', 'joe', 'jolie'], ['first', 'last'], \
['left', 'center', 'right']
cols = MultiIndex.from_product(it)
index = pd.date_range('20141006', periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = pd.DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df['jim'] = df['jolie'].loc[i, ::-1]
assert_frame_equal(df['jim'], df['jolie'])
np.random.shuffle(j)
df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]
assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])
np.random.shuffle(j)
df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]
assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])
def test_setitem_callable(self):
# GH 12533
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8]})
df[lambda x: 'A'] = [11, 12, 13, 14]
exp = pd.DataFrame({'A': [11, 12, 13, 14], 'B': [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH 13299
def inc(x):
return x + 1
df = pd.DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
expected = pd.DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
def test_getitem_boolean(self):
# boolean indexing
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
indexer_obj = indexer.astype(object)
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
tm.assert_index_equal(subindex, subframe.index)
with tm.assert_raises_regex(ValueError, 'Item wrong length'):
self.tsframe[indexer[:-1]]
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assert_raises_regex(ValueError, 'boolean values only'):
self.tsframe[self.tsframe]
# test that Series work
indexer_obj = Series(indexer_obj, self.tsframe.index)
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [self.tsframe, self.mixed_frame,
self.mixed_float, self.mixed_int]:
if compat.PY3 and df is self.mixed_frame:
continue
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame({c: np.where(data[c] > 0, data[c], np.nan)
for c in data.columns},
index=data.index, columns=data.columns)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self):
# don't upcast if we don't need to
df = self.tsframe.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
df['F'] = 1
df['F'] = df['F'].astype('int64')
df['F1'] = df['F'].copy()
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 4, 'int32': 2, 'int64': 2})
assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ['E1', 'F1']] = 0
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 6, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# where dtype conversions
# GH 3733
df = DataFrame(data=np.random.randn(100, 50))
df = df.where(df > 0) # create nans
bools = df > 0
mask = isna(df)
expected = bools.astype(float).mask(mask)
result = bools.mask(mask)
assert_frame_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = randn(5, 5)
df = DataFrame(arr.copy(), columns=['A', 'B', 'C', 'D', 'E'])
df[df < 0] += 1
arr[arr < 0] += 1
assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=['A'],
index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(np.random.randn(4, 3),
index=[1, 10, 'C', 'E'], columns=[1, 2, 3])
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
assert_frame_equal(result, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
# 11320
df = pd.DataFrame({"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30]},
columns=['rna', -1000, 0, 1000])
result = df[[1000]]
expected = df.iloc[:, [3]]
assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
assert_frame_equal(result, expected)
def test_getitem_setitem_ix_negative_integers(self):
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
self.frame.ix[:, [-1]] = 0
assert (self.frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
# ix does label-based indexing when having an integer index
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
with pytest.raises(KeyError):
df.ix[[-1]]
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
with pytest.raises(KeyError):
df.ix[:, [-1]]
# #1942
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
a.ix[-1] = a.ix[-2]
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_series_equal(a.ix[-1], a.ix[-2], check_names=False)
assert a.ix[-1].name == 'T'
assert a.ix[-2].name == 'S'
def test_getattr(self):
assert_series_equal(self.frame.A, self.frame['A'])
pytest.raises(AttributeError, getattr, self.frame,
'NONEXISTENT_NAME')
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self):
# not sure what else to do here
series = self.frame['A'][::2]
self.frame['col5'] = series
assert 'col5' in self.frame
assert len(series) == 15
assert len(self.frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=self.frame.index, name='col5')
tm.assert_series_equal(self.frame['col5'], exp)
series = self.frame['A']
self.frame['col6'] = series
tm.assert_series_equal(series, self.frame['col6'], check_names=False)
with pytest.raises(KeyError):
self.frame[randn(len(self.frame) + 1)] = 1
# set ndarray
arr = randn(len(self.frame))
self.frame['col9'] = arr
assert (self.frame['col9'] == arr).all()
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col0'] = 3.14
assert((self.frame['col0'] == 3.14).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = self.frame[:2]
def f():
smaller['col10'] = ['1', '2']
pytest.raises(com.SettingWithCopyError, f)
assert smaller['col10'].dtype == np.object_
assert (smaller['col10'] == ['1', '2']).all()
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype):
arr = randn(len(self.frame))
self.frame[dtype] = np.array(arr, dtype=dtype)
assert self.frame[dtype].dtype.name == dtype
def test_setitem_tuple(self):
self.frame['A', 'B'] = self.frame['A']
assert_series_equal(self.frame['A', 'B'], self.frame[
'A'], check_names=False)
def test_setitem_always_copy(self):
s = self.frame['A'].copy()
self.frame['E'] = s
self.frame['E'][5:10] = nan
assert notna(s[5:10]).all()
def test_setitem_boolean(self):
df = self.frame.copy()
values = self.frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
assert_almost_equal(df.values, values)
# test that column reindexing works
series = df['A'] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with tm.assert_raises_regex(TypeError, msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = nan
expected.values[mask.values] = nan
assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"mask_type",
[lambda df: df > np.abs(df) / 2,
lambda df: (df > np.abs(df) / 2).values],
ids=['dataframe', 'array'])
def test_setitem_boolean_mask(self, mask_type):
# Test for issue #18582
df = self.frame.copy()
mask = mask_type(df)
# index with boolean mask
result = df.copy()
result[mask] = np.nan
expected = df.copy()
expected.values[np.array(mask)] = np.nan
assert_frame_equal(result, expected)
def test_setitem_cast(self):
self.frame['D'] = self.frame['D'].astype('i8')
assert self.frame['D'].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
self.frame['B'] = 0
assert self.frame['B'].dtype == np.int64
# cast if pass array of course
self.frame['B'] = np.arange(len(self.frame))
assert issubclass(self.frame['B'].dtype.type, np.integer)
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
assert self.frame['foo'].dtype == np.int64
self.frame['foo'] = 'bar'
self.frame['foo'] = 2.5
assert self.frame['foo'].dtype == np.float64
self.frame['something'] = 0
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2.5
assert self.frame['something'].dtype == np.float64
# GH 7704
# dtype conversion on setting
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10, 'event'] = 'foo'
result = df.get_dtype_counts().sort_values()
expected = Series({'float64': 3, 'object': 1}).sort_values()
assert_series_equal(result, expected)
# Test that data type is preserved . #5782
df = DataFrame({'one': np.arange(6, dtype=np.int8)})
df.loc[1, 'one'] = 6
assert df.dtypes.one == np.dtype(np.int8)
df.one = np.int8(7)
assert df.dtypes.one == np.dtype(np.int8)
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
self.frame.loc[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
assert_frame_equal(self.frame, expected)
def test_frame_setitem_timestamp(self):
# GH#2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012', freq=BDay())
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
def test_setitem_corner(self):
# corner case
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
index=np.arange(3))
del df['B']
df['B'] = [1., 2., 3.]
assert 'B' in df
assert len(df.columns) == 2
df['A'] = 'beginning'
df['E'] = 'foo'
df['D'] = 'bar'
df[datetime.now()] = 'date'
df[datetime.now()] = 5.
# what to do when empty frame with index
dm = DataFrame(index=self.frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm['C'] = 1
assert dm['C'].dtype == np.int64
dm['E'] = 1.
assert dm['E'].dtype == np.float64
# set existing column
dm['A'] = 'bar'
assert 'bar' == dm['A'][0]
dm = DataFrame(index=np.arange(3))
dm['A'] = 1
dm['foo'] = 'bar'
del dm['foo']
dm['foo'] = 'bar'
assert dm['foo'].dtype == np.object_
dm['coercable'] = ['1', '2', '3']
assert dm['coercable'].dtype == np.object_
def test_setitem_corner2(self):
data = {"title": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,
"cruft": np.random.random(20)}
df = DataFrame(data)
ix = df[df['title'] == 'bar'].index
df.loc[ix, ['title']] = 'foobar'
df.loc[ix, ['cruft']] = 0
assert df.loc[1, 'title'] == 'foobar'
assert df.loc[1, 'cruft'] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=lrange(3), columns=lrange(3))
coercable_series = Series([Decimal(1) for _ in range(3)],
index=lrange(3))
uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_clear_caches(self):
# see gh-304
df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},
index=[0, 1, 2, 3])
df.insert(2, 'z', np.nan)
# cache it
foo = df['z']
df.loc[df.index[2:], 'z'] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name='z')
assert df['z'] is not foo
tm.assert_series_equal(df['z'], expected)
def test_setitem_None(self):
# GH #766
self.frame[None] = self.frame['A']
assert_series_equal(
self.frame.iloc[:, -1], self.frame['A'], check_names=False)
assert_series_equal(self.frame.loc[:, None], self.frame[
'A'], check_names=False)
assert_series_equal(self.frame[None], self.frame[
'A'], check_names=False)
repr(self.frame)
def test_setitem_empty(self):
# GH 9596
df = pd.DataFrame({'a': ['1', '2', '3'],
'b': ['11', '22', '33'],
'c': ['111', '222', '333']})
result = df.copy()
result.loc[result.b.isna(), 'a'] = result.a
assert_frame_equal(result, df)
@pytest.mark.parametrize("dtype", ["float", "int64"])
@pytest.mark.parametrize("kwargs", [
dict(),
dict(index=[1]),
dict(columns=["A"])
])
def test_setitem_empty_frame_with_boolean(self, dtype, kwargs):
# see gh-10126
kwargs["dtype"] = dtype
df = DataFrame(**kwargs)
df2 = df.copy()
df[df > df2] = 47
assert_frame_equal(df, df2)
def test_setitem_scalars_no_index(self):
# GH16823 / 17894
df = DataFrame()
df['foo'] = 1
expected = DataFrame(columns=['foo']).astype(np.int64)
assert_frame_equal(df, expected)
def test_getitem_empty_frame_with_boolean(self):
# Test for issue #11859
df = pd.DataFrame()
df2 = df[df > 0]
assert_frame_equal(df, df2)
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
assert len(f.columns) == 3
pytest.raises(KeyError, f.__delitem__, 'D')
del f['B']
assert len(f.columns) == 2
def test_getitem_fancy_2d(self):
f = self.frame
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[:, ['B', 'A']],
f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
# slicing rows, etc.
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[5:10], f[5:10])
assert_frame_equal(f.ix[5:10, :], f[5:10])
assert_frame_equal(f.ix[:5, ['A', 'B']],
f.reindex(index=f.index[:5],
columns=['A', 'B']))
# slice rows with labels, inclusive!
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
expected = f.ix[5:11]
result = f.ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
# slice columns
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[:, :2], f.reindex(columns=['A', 'B']))
# get view
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
exp = f.copy()
f.ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
pytest.raises(ValueError, f.ix.__getitem__, f > 0.5)
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
s1 = df.loc[52195.1:52196.5]
assert len(s1) == 2
s1 = df.loc[52195.1:52196.6]
assert len(s1) == 2
s1 = df.loc[52195.1:52198.9]
assert len(s1) == 3
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
pytest.raises(KeyError, df2.loc.__getitem__, slice(3, 11))
pytest.raises(KeyError, df2.loc.__setitem__, slice(3, 11), 0)
def test_setitem_fancy_2d(self):
# case 1
frame = self.frame.copy()
expected = frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
assert_frame_equal(frame, expected)
# case 2
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = frame.copy()
subidx = self.frame.index[[5, 4, 1]]
values = randn(3, 2)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
expected['B'].ix[subidx] = values[:, 0]
expected['A'].ix[subidx] = values[:, 1]
assert_frame_equal(frame, expected)
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
frame = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
expected2 = self.frame.copy()
arr = randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
assert_frame_equal(frame, expected2)
# case 4
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
assert_frame_equal(frame, expected2)
# case 5
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
values = randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
expected['A'][:5] = values[:, 0]
expected['B'][:5] = values[:, 1]
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
# case 6: slice rows with labels, inclusive!
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
assert_frame_equal(frame, expected)
# case 7: slice columns
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
expected.values[:, 1:3] = 4.
assert_frame_equal(frame, expected)
# slice with labels
frame.ix[:, 'B':'C'] = 4.
assert_frame_equal(frame, expected)
# new corner case of boolean slicing / setting
frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),
columns=['a', 'b'])
lst = [100]
lst.extend([np.nan] * 4)
expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),
columns=['a', 'b'])
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.iloc[:, -3:]
assert sliced['D'].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = self.frame.iloc[:, -3:]
def f():
sliced['C'] = 4.
pytest.raises(com.SettingWithCopyError, f)
assert (self.frame['C'] == 4).all()
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
exp.values[:3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
exp.values[3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
# tmp correctly sets the dtype
# so match the exp way
exp[2] = 5
assert_frame_equal(tmp, exp)
def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
# labels that aren't contained
pytest.raises(KeyError, df.ix.__setitem__,
([0, 1, 2], [2, 3, 4]), 5)
# try to set indices not contained in frame
pytest.raises(KeyError, self.frame.ix.__setitem__,
['foo', 'bar', 'baz'], 1)
pytest.raises(KeyError, self.frame.ix.__setitem__,
(slice(None, None), ['E']), 1)
# partial setting now allows this GH2578
# pytest.raises(KeyError, self.frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
assert (result.values == 5).all()
self.mixed_frame.ix[5] = np.nan
assert isna(self.mixed_frame.ix[5]).all()
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
check_names=False)
# #1432
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
assert df._is_mixed_type
df.ix[1] = [5, 10]
expected = DataFrame({1: [1., 5., 3.],
2: [3, 10, 5]})
assert_frame_equal(df, expected)
def test_ix_align(self):
b = Series(randn(10), name=0).sort_values()
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
s = df.ix[idx, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
assert_series_equal(s, b.reindex(s.index))
def test_ix_frame_align(self):
b = DataFrame(np.random.randn(3, 4))
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
b.sort_index(inplace=True)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
assert_frame_equal(out, b.reindex(out.index))
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
assert_frame_equal(rs, xp)
""" #1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index==0, df.columns==1]
xp = df.reindex([0], [1])
assert_frame_equal(rs, xp)
"""
def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
def test_ix_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
df = DataFrame(np.random.randn(len(idx), 3), idx)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
sub = df.ix[:'d']
assert_frame_equal(sub, df)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
f = self.frame
# return self if no slicing...for now
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert f.ix[:, :] is f
# low dimensional slice
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
xs1 = f.ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
ts1 = f.ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
tm.assert_series_equal(ts1, ts2)
# positional xs
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
xs1 = f.ix[0]
xs2 = f.xs(f.index[0])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
xs1 = f.ix[f.index[5]]
xs2 = f.xs(f.index[5])
tm.assert_series_equal(xs1, xs2)
# single column
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_series_equal(f.ix[:, 'A'], f['A'])
# return view
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
exp = f.copy()
exp.values[5] = 4
f.ix[5][:] = 4
tm.assert_frame_equal(exp, f)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
exp.values[:, 1] = 6
f.ix[:, 1][:] = 6
tm.assert_frame_equal(exp, f)
# slice of mixed-frame
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
tm.assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
expected['A'][2] = 3.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
vals = randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
# single column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
f = self.frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
def test_setitem_fancy_scalar(self):
f = self.frame
expected = self.frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = randn()
expected.values[i, j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.loc
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[f.index[5:10], [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.loc[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.loc[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.iloc[[1, 4, 7]]
expected = self.frame.loc[self.frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
result = self.frame.iloc[:, [2, 0, 1]]
expected = self.frame.loc[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.iloc
with tm.assert_raises_regex(IndexingError, 'Too many indexers'):
ix[:, :, :]
with pytest.raises(IndexingError):
ix[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
result = self.frame.loc[mask]
expected = self.frame.loc[mask[::-1]]
assert_frame_equal(result, expected)
cp = self.frame.copy()
expected = self.frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]),
np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
pytest.raises(TypeError, lambda: df.iloc[1.0:5])
result = df.iloc[4:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
def f():
cp.iloc[1.0:5] = 0
pytest.raises(TypeError, f)
def f():
result = cp.iloc[1.0:5] == 0 # noqa
pytest.raises(TypeError, f)
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['str'] = 'qux'
df.loc[df.index[::2], 'str'] = nan
expected = np.array([nan, 'qux', nan, 'qux', nan], dtype=object)
assert_almost_equal(df['str'].values, expected)
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['timestamp'] = Timestamp('20010102')
# check our dtypes
result = df.get_dtype_counts()
expected = Series({'float64': 3, 'datetime64[ns]': 1})
assert_series_equal(result, expected)
# set an allowable datetime64 type
df.loc['b', 'timestamp'] = iNaT
assert isna(df.loc['b', 'timestamp'])
# allow this syntax
df.loc['c', 'timestamp'] = nan
assert isna(df.loc['c', 'timestamp'])
# allow this syntax
df.loc['d', :] = nan
assert not isna(df.loc['c', :]).all()
# as of GH 3216 this will now work!
# try to set with a list like item
# pytest.raises(
# Exception, df.loc.__setitem__, ('d', 'timestamp'), [nan])
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame({'a': [0, 0, 0, 0, 13, 14],
'b': [pd.datetime(2012, 1, 1),
1,
'x',
'y',
pd.datetime(2013, 1, 1),
pd.datetime(2014, 1, 1)]})
df = pd.DataFrame(0, columns=list('ab'), index=range(6))
df['b'] = pd.NaT
df.loc[0, 'b'] = pd.datetime(2012, 1, 1)
df.loc[1, 'b'] = 1
df.loc[[2, 3], 'b'] = 'x', 'y'
A = np.array([[13, np.datetime64('2013-01-01T00:00:00')],
[14, np.datetime64('2014-01-01T00:00:00')]])
df.loc[[4, 5], ['a', 'b']] = A
assert_frame_equal(df, expected)
def test_setitem_frame(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
self.frame.loc[self.frame.index[-2]:, ['A', 'B']] = piece.values
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
# GH 3216
# already aligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.]],
index=f.index[0:2], columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2], ['A', 'B']].values,
piece.values)
# rows unaligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.], [5., 6.], [7., 8.]],
index=list(f.index[0:2]) + ['foo', 'bar'],
columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2:], ['A', 'B']].values,
piece.values[0:2])
# key is unaligned with values
f = self.mixed_frame.copy()
piece = f.loc[f.index[:2], ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece
piece['B'] = np.nan
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# ndarray
f = self.mixed_frame.copy()
piece = self.mixed_frame.loc[f.index[:2], ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece.values
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# needs upcasting
df = DataFrame([[1, 2, 'foo'], [3, 4, 'bar']], columns=['A', 'B', 'C'])
df2 = df.copy()
df2.loc[:, ['A', 'B']] = df.loc[:, ['A', 'B']] + 0.5
expected = df.reindex(columns=['A', 'B'])
expected += 0.5
expected['C'] = df['C']
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
piece.index = self.frame.index[-2:]
piece.columns = ['A', 'B']
self.frame.loc[self.frame.index[-2:], ['A', 'B']] = piece
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc['foo']
expected = df[:2]
assert_frame_equal(result, expected)
result = df.loc['bar']
expected = df.iloc[[2, 4]]
assert_frame_equal(result, expected)
result = df.loc['baz']
expected = df.iloc[3]
assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc[['bar']]
exp = df.iloc[[2, 4]]
assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
assert_frame_equal(result, exp)
def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
pytest.raises(KeyError, df.loc.__getitem__, False)
pytest.raises(KeyError, df.loc.__getitem__, True)
pytest.raises(KeyError, df.loc.__setitem__, False, 0)
pytest.raises(KeyError, df.loc.__setitem__, True, 0)
def test_getitem_list_duplicates(self):
# #1943
df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))
df.columns.name = 'foo'
result = df[['B', 'C']]
assert result.columns.name == 'foo'
expected = df.iloc[:, 2:]
assert_frame_equal(result, expected)
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert result == expected
def test_lookup(self):
def alt(df, rows, cols, dtype):
result = []
for r, c in zip(rows, cols):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result.append(df.get_value(r, c))
return np.array(result, dtype=dtype)
def testit(df):
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
result = df.lookup(rows, cols)
expected = alt(df, rows, cols, dtype=np.object_)
tm.assert_almost_equal(result, expected, check_dtype=False)
testit(self.mixed_frame)
testit(self.frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
exp_mask = alt(df, df.index, 'mask_' + df['label'], dtype=np.bool_)
tm.assert_series_equal(df['mask'], pd.Series(exp_mask, name='mask'))
assert df['mask'].dtype == np.bool_
with pytest.raises(KeyError):
self.frame.lookup(['xyz'], ['A'])
with pytest.raises(KeyError):
self.frame.lookup([self.frame.index[0]], ['xyz'])
with tm.assert_raises_regex(ValueError, 'same size'):
self.frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
self.frame.set_value(idx, col, 1)
assert self.frame[col][idx] == 1
def test_set_value_resize(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = self.frame.set_value('foobar', 'B', 0)
assert res is self.frame
assert res.index[-1] == 'foobar'
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert res.get_value('foobar', 'B') == 0
self.frame.loc['foobar', 'qux'] = 0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert self.frame.get_value('foobar', 'qux') == 0
res = self.frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 'sam')
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', True)
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 5)
assert is_float_dtype(res3['baz'])
assert isna(res3['baz'].drop(['foobar'])).all()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.set_value('C', 2, 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
df = df_orig.copy()
df.loc['C', 2] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
# create both new
df = df_orig.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.set_value('C', 'D', 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
df = df_orig.copy()
df.loc['C', 'D'] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=lrange(4))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pytest.raises(KeyError, df.get_value, 0, 1)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
assert issubclass(self.frame['E'].dtype.type, (int, np.integer))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = self.frame.ix[self.frame.index[5], 'E']
assert is_integer(result)
result = self.frame.loc[self.frame.index[5], 'E']
assert is_integer(result)
# GH 11617
df = pd.DataFrame(dict(a=[1.23]))
df["b"] = 666
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[0, "b"]
assert is_integer(result)
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name='b')
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[[0], "b"]
assert_series_equal(result, expected)
result = df.loc[[0], "b"]
assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
assert_frame_equal(result, expected)
# verify slice is view
# setting it makes it raise/warn
def f():
result[2] = 0.
pytest.raises(com.SettingWithCopyError, f)
exp_col = df[2].copy()
exp_col[4:8] = 0.
assert_series_equal(df[2], exp_col)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
assert_frame_equal(result, expected)
# verify slice is view
# and that we are setting a copy
def f():
result[8] = 0.
pytest.raises(com.SettingWithCopyError, f)
assert (df[8] == 0).all()
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_duplicates(self):
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
result = df.iloc[0]
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result2 = df.ix[0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.T.iloc[:, 0]
result2 = df.T.ix[:, 0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
# multiindex
df = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.iloc[0]
xp = df.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.iloc[:, 0]
xp = df.T.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.iloc[:, [0]]
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = pd.date_range(start=pd.Timestamp(year=2017, month=1, day=1),
end=pd.Timestamp(year=2017, month=1, day=5))
trange = trange.insert(loc=5,
item=pd.Timestamp(year=2017, month=1, day=5))
df = pd.DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = pd.DataFrame({'A': [0, 0, 0, 0, 6, 6],
'B': [0, 0, 0, 0, 0, 0]},
index=trange)
tm.assert_frame_equal(df, expected)
# in-place
df = pd.DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_iloc_sparse_propegate_fill_value(self):
from pandas.core.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
assert len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values)
def test_iat(self):
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
result = self.frame.iat[i, j]
expected = self.frame.at[row, col]
assert result == expected
def test_nested_exception(self):
# Ignore the strange way of triggering the problem
# (which may get fixed), it's just a way to trigger
# the issue or reraising an outer exception without
# a named argument
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
index = list(df.index)
index[0] = ["a", "b"]
df.index = index
try:
repr(df)
except Exception as e:
assert type(e) != UnboundLocalError
@pytest.mark.parametrize("method,expected_values", [
("nearest", [0, 1, 1, 2]),
("pad", [np.nan, 0, 1, 1]),
("backfill", [0, 1, 2, 2])
])
def test_reindex_methods(self, method, expected_values):
df = pd.DataFrame({"x": list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
expected = pd.DataFrame({'x': expected_values}, index=target)
actual = df.reindex(target, method=method)
assert_frame_equal(expected, actual)
actual = df.reindex_like(df, method=method, tolerance=0)
assert_frame_equal(df, actual)
actual = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0])
assert_frame_equal(df, actual)
actual = df.reindex(target, method=method, tolerance=1)
assert_frame_equal(expected, actual)
actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1])
assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
assert_frame_equal(e2, actual)
new_order = [3, 0, 2, 1]
e2 = expected.iloc[new_order]
actual = df.reindex(target[new_order], method=method)
assert_frame_equal(e2, actual)
switched_method = ('pad' if method == 'backfill'
else 'backfill' if method == 'pad'
else method)
actual = df[::-1].reindex(target, method=switched_method)
assert_frame_equal(expected, actual)
def test_reindex_methods_nearest_special(self):
df = pd.DataFrame({"x": list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
expected = pd.DataFrame({"x": [0, 1, 1, np.nan]}, index=target)
actual = df.reindex(target, method="nearest", tolerance=0.2)
assert_frame_equal(expected, actual)
expected = pd.DataFrame({"x": [0, np.nan, 1, np.nan]}, index=target)
actual = df.reindex(target, method="nearest",
tolerance=[0.5, 0.01, 0.4, 0.1])
assert_frame_equal(expected, actual)
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
assert np.issubdtype(result['B'].dtype, np.dtype('M8[ns]'))
mask = com.isna(result)['B']
assert mask[-5:].all()
assert not mask[:-5].any()
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
assert x[0].dtype == np.dtype('M8[ns]')
def test_non_monotonic_reindex_methods(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6, 1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],
columns=list('A'))
# index is not monotonic increasing or decreasing
pytest.raises(ValueError, df_rev.reindex, df.index, method='pad')
pytest.raises(ValueError, df_rev.reindex, df.index, method='ffill')
pytest.raises(ValueError, df_rev.reindex, df.index, method='bfill')
pytest.raises(ValueError, df_rev.reindex, df.index, method='nearest')
def test_reindex_level(self):
from itertools import permutations
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx, check_index_type=True):
def f(val):
return np.nonzero(df[level] == val)[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
def verify(df, level, idx, indexer, check_index_type=True):
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[indexer].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
df = pd.DataFrame({'jim': list('B' * 4 + 'A' * 2 + 'C' * 3),
'joe': list('abcdeabcd')[::-1],
'jolie': [10, 20, 30] * 3,
'joline': np.random.randint(0, 1000, 9)})
target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'],
['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],
['A', 'B'], ['B', 'A', 'C']]
for idx in target:
verify_first_level(df, 'jim', idx)
# reindex by these causes different MultiIndex levels
for idx in [['D', 'F'], ['A', 'C', 'B']]:
verify_first_level(df, 'jim', idx, check_index_type=False)
verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])
verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])
verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])
verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])
verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])
verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])
verify(df, 'joe', list('edwq'), [0, 4, 5])
verify(df, 'joe', list('wq'), [], check_index_type=False)
df = DataFrame({'jim': ['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,
'joe': ['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +
['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +
['3rd'] * 3 + ['2nd'] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
'jolie': np.concatenate([
np.random.choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]]),
'joline': np.random.randn(20).round(3) * 10})
for idx in permutations(df['jim'].unique()):
for i in range(3):
verify_first_level(df, 'jim', idx[:i + 1])
i = [2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10,
11, 12, 13, 14, 18, 19, 15, 16, 17]
verify(df, 'joe', ['1st', '2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 5, 6,
7, 8, 9, 15, 16, 17, 18, 19, 13, 14]
verify(df, 'joe', ['3rd', '2nd', '1st'], i)
i = [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17]
verify(df, 'joe', ['2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14]
verify(df, 'joe', ['3rd', '1st'], i)
def test_getitem_ix_float_duplicates(self):
df = pd.DataFrame(np.random.randn(3, 3),
index=[0.1, 0.2, 0.2], columns=list('abc'))
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df = pd.DataFrame(np.random.randn(4, 3),
index=[1, 0.2, 0.2, 1], columns=list('abc'))
expect = df.iloc[1:-1]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
def test_setitem_with_sparse_value(self):
# GH8131
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)
df['new_column'] = sp_series
assert_series_equal(df['new_column'], sp_series, check_names=False)
def test_setitem_with_unaligned_sparse_value(self):
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])
.to_sparse(fill_value=0))
df['new_column'] = sp_series
exp = pd.SparseSeries([1, 0, 0], name='new_column')
assert_series_equal(df['new_column'], exp)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = pd.Series(pd.date_range('2015-01-01', periods=3, tz='utc'),
name='dates')
df = pd.DataFrame({'dates': column})
df['dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
df = pd.DataFrame({'dates': column})
df.loc[[0, 1, 2], 'dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
def test_setitem_datetime_coercion(self):
# gh-1048
df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')] * 3})
df.loc[0:1, 'c'] = np.datetime64('2008-08-08')
assert pd.Timestamp('2008-08-08') == df.loc[0, 'c']
assert pd.Timestamp('2008-08-08') == df.loc[1, 'c']
df.loc[2, 'c'] = date(2005, 5, 5)
assert pd.Timestamp('2005-05-05') == df.loc[2, 'c']
def test_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range('20130101', periods=4))
df['A'] = np.array([1 * one_hour] * 4, dtype='m8[ns]')
df.loc[:, 'B'] = np.array([2 * one_hour] * 4, dtype='m8[ns]')
df.loc[:3, 'C'] = np.array([3 * one_hour] * 3, dtype='m8[ns]')
df.loc[:, 'D'] = np.array([4 * one_hour] * 4, dtype='m8[ns]')
df.loc[df.index[:3], 'E'] = np.array([5 * one_hour] * 3,
dtype='m8[ns]')
df['F'] = np.timedelta64('NaT')
df.loc[df.index[:-1], 'F'] = np.array([6 * one_hour] * 3,
dtype='m8[ns]')
df.loc[df.index[-3]:, 'G'] = date_range('20130101', periods=3)
df['H'] = np.datetime64('NaT')
result = df.dtypes
expected = Series([np.dtype('timedelta64[ns]')] * 6 +
[np.dtype('datetime64[ns]')] * 2,
index=list('ABCDEFGH'))
assert_series_equal(result, expected)
@pytest.mark.parametrize('idxer', ['var', ['var']])
def test_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture):
# GH 11365
tz = tz_naive_fixture
idx = date_range(start='2015-07-12', periods=3, freq='H', tz=tz)
expected = DataFrame(1.2, index=idx, columns=['var'])
result = DataFrame(index=idx, columns=['var'])
result.loc[:, idxer] = expected
tm.assert_frame_equal(result, expected)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
df = DataFrame(randn(len(index), 5), index=index)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.at_time(akey)
expected = df.loc[akey]
expected2 = df.iloc[ainds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 4
result = df.between_time(bkey.start, bkey.stop)
expected = df.loc[bkey]
expected2 = df.iloc[binds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 12
result = df.copy()
result.loc[akey] = 0
result = result.loc[akey]
expected = df.loc[akey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[akey] = 0
result.loc[akey] = df.iloc[ainds]
assert_frame_equal(result, df)
result = df.copy()
result.loc[bkey] = 0
result = result.loc[bkey]
expected = df.loc[bkey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[bkey] = 0
result.loc[bkey] = df.iloc[binds]
assert_frame_equal(result, df)
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
assert np.isnan(self.frame[item][idx])
else:
assert value == self.frame[item][idx]
# mixed-type xs
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data)
xs = frame.xs('1')
assert xs.dtype == np.object_
assert xs['A'] == 1
assert xs['B'] == '1'
with pytest.raises(KeyError):
self.tsframe.xs(self.tsframe.index[0] - BDay())
# xs get column
series = self.frame.xs('A', axis=1)
expected = self.frame['A']
assert_series_equal(series, expected)
# view is returned if possible
series = self.frame.xs('A', axis=1)
series[:] = 5
assert (expected == 5).all()
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0])
df['A'] = 1.
df['B'] = 'foo'
df['C'] = 2.
df['D'] = 'bar'
df['E'] = 3.
xs = df.xs(0)
exp = pd.Series([1., 'foo', 2., 'bar', 3.],
index=list('ABCDE'), name=0)
tm.assert_series_equal(xs, exp)
# no columns but Index(dtype=object)
df = DataFrame(index=['a', 'b', 'c'])
result = df.xs('a')
expected = Series([], name='a', index=pd.Index([], dtype=object))
assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
exp = df.iloc[2]
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = (DataFrame({'day': {0: 'sat', 1: 'sun'},
'flavour': {0: 'strawberry', 1: 'strawberry'},
'sales': {0: 10, 1: 12},
'year': {0: 2008, 1: 2008}})
.set_index(['year', 'flavour', 'day']))
result = df.xs('sat', level='day', drop_level=False)
expected = df[:1]
assert_frame_equal(result, expected)
result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)
assert_frame_equal(result, expected)
def test_xs_view(self):
# in 0.14 this will return a view if possible a copy otherwise, but
# this is numpy dependent
dm = DataFrame(np.arange(20.).reshape(4, 5),
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
assert (dm.xs(2) == 10).all()
def test_index_namedtuple(self):
from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2],
name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[IndexType("foo", "bar")]["A"]
assert result == 1
result = df.loc[IndexType("foo", "bar")]["A"]
assert result == 1
def test_boolean_indexing(self):
idx = lrange(3)
cols = ['A', 'B', 'C']
df1 = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, 2.5],
[3.0, 3.5, 4.0]],
dtype=float))
df2 = DataFrame(index=idx, columns=cols,
data=np.ones((len(idx), len(cols))))
expected = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, -1],
[-1, -1, -1]], dtype=float))
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
with tm.assert_raises_regex(ValueError, 'Item wrong length'):
df1[df1.index[:-1] > 2] = -1
def test_boolean_indexing_mixed(self):
df = DataFrame({
long(0): {35: np.nan, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
long(1): {35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139},
long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987,
49: np.nan, 50: np.nan},
long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan,
50: np.nan},
long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
assert_frame_equal(df2, expected)
df['foo'] = 'test'
msg = ("boolean setting on mixed-type|"
"not supported between|"
"unorderable types")
with tm.assert_raises_regex(TypeError, msg):
# TODO: This message should be the same in PY2/PY3
df[df > 0.3] = 1
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),
columns=['A', 'B', 'C'])
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return (issubclass(s.dtype.type, (np.integer, np.floating)) and
s.dtype != 'uint8')
return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s)
for c, s in compat.iteritems(df)))
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.iteritems():
exp = Series(
np.where(cond[k], df[k], other1[k]), index=v.index)
assert_series_equal(v, exp, check_names=False)
assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
assert (rs.dtypes == df.dtypes).all()
# check getting
for df in [default_frame, self.mixed_frame,
self.mixed_float, self.mixed_int]:
if compat.PY3 and df is self.mixed_frame:
with pytest.raises(TypeError):
df > 0
continue
cond = df > 0
_check_get(df, cond)
# upcasting case (GH # 2794)
df = DataFrame({c: Series([1] * 3, dtype=c)
for c in ['float32', 'float64',
'int32', 'int64']})
df.iloc[1, :] = 0
result = df.where(df >= 0).get_dtype_counts()
# when we don't preserve boolean casts
#
# expected = Series({ 'float32' : 1, 'float64' : 3 })
expected = Series({'float32': 1, 'float64': 1, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if is_scalar(other):
o = other
else:
if isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
for df in [self.mixed_frame, self.mixed_float, self.mixed_int]:
if compat.PY3 and df is self.mixed_frame:
with pytest.raises(TypeError):
df > 0
continue
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all(not issubclass(s.type, np.integer)
for s in df.dtypes)
_check_align(df, cond, np.nan, check_dtypes=check_dtypes)
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
pytest.raises(ValueError, df.where, cond, err1)
err2 = cond.iloc[:2, :].values
other1 = _safe_add(df)
pytest.raises(ValueError, df.where, err2, other1)
pytest.raises(ValueError, df.mask, True)
pytest.raises(ValueError, df.mask, 0)
# where inplace
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
dfi.where(cond, np.nan, inplace=True)
assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in compat.iteritems(df.dtypes):
if issubclass(v.type, np.integer) and not cond[k].all():
v = np.dtype('float64')
assert dfi[k].dtype == v
for df in [default_frame, self.mixed_frame, self.mixed_float,
self.mixed_int]:
if compat.PY3 and df is self.mixed_frame:
with pytest.raises(TypeError):
df > 0
continue
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligining
cond = (df >= 0)[1:]
_check_set(df, cond)
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({'a': range(3), 'b': range(4, 7)})
result = df.where(df['a'] == 1)
expected = df[df['a'] == 1].reindex(df.index)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("klass", [list, tuple, np.array])
def test_where_array_like(self, klass):
# see gh-15414
df = DataFrame({"a": [1, 2, 3]})
cond = [[False], [True], [True]]
expected = DataFrame({"a": [np.nan, 2, 3]})
result = df.where(klass(cond))
assert_frame_equal(result, expected)
df["b"] = 2
expected["b"] = [2, np.nan, 2]
cond = [[False, True], [True, False], [True, True]]
result = df.where(klass(cond))
assert_frame_equal(result, expected)
@pytest.mark.parametrize("cond", [
[[1], [0], [1]],
Series([[2], [5], [7]]),
DataFrame({"a": [2, 5, 7]}),
[["True"], ["False"], ["True"]],
[[Timestamp("2017-01-01")],
[pd.NaT], [Timestamp("2017-01-02")]]
])
def test_where_invalid_input_single(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3]})
msg = "Boolean array expected for the condition"
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
@pytest.mark.parametrize("cond", [
[[0, 1], [1, 0], [1, 1]],
Series([[0, 2], [5, 0], [4, 7]]),
[["False", "True"], ["True", "False"],
["True", "True"]],
DataFrame({"a": [2, 5, 7], "b": [4, 8, 9]}),
[[pd.NaT, Timestamp("2017-01-01")],
[Timestamp("2017-01-02"), pd.NaT],
[Timestamp("2017-01-03"), Timestamp("2017-01-03")]]
])
def test_where_invalid_input_multiple(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3], "b": [2, 2, 2]})
msg = "Boolean array expected for the condition"
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
def test_where_dataframe_col_match(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = DataFrame([[True, False, True], [False, False, True]])
result = df.where(cond)
expected = DataFrame([[1.0, np.nan, 3], [np.nan, np.nan, 6]])
tm.assert_frame_equal(result, expected)
# this *does* align, though has no matching columns
cond.columns = ["a", "b", "c"]
result = df.where(cond)
expected = DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
expected = DataFrame([[1, 2, 3], [np.nan, np.nan, np.nan]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
expected = DataFrame([[np.nan, np.nan, np.nan], [4, 5, 6]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
def test_where_bug(self):
# see gh-2793
df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [
4.0, 3.0, 2.0, 1.0]}, dtype='float64')
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [
4.0, 3.0, np.nan, np.nan]}, dtype='float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
def test_where_bug_mixed(self, sint_dtype):
# see gh-2793
df = DataFrame({"a": np.array([1, 2, 3, 4], dtype=sint_dtype),
"b": np.array([4.0, 3.0, 2.0, 1.0],
dtype="float64")})
expected = DataFrame({"a": [np.nan, np.nan, 3.0, 4.0],
"b": [4.0, 3.0, np.nan, np.nan]},
dtype="float64")
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
def test_where_bug_transposition(self):
# see gh-7506
a = DataFrame({0: [1, 2], 1: [3, 4], 2: [5, 6]})
b = DataFrame({0: [np.nan, 8], 1: [9, np.nan], 2: [np.nan, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
a = DataFrame({0: [4, 6], 1: [1, 0]})
b = DataFrame({0: [np.nan, 3], 1: [3, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(dict(A=date_range('20130102', periods=5),
B=date_range('20130104', periods=5),
C=np.random.randn(5)))
stamp = datetime(2013, 1, 3)
with pytest.raises(TypeError):
df > stamp
result = df[df.iloc[:, :-1] > stamp]
expected = df.copy()
expected.loc[[0, 1], 'A'] = np.nan
expected.loc[:, 'C'] = np.nan
assert_frame_equal(result, expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({'series': Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame(
{'series': Series([0, 1, 2, 3, 4, 5, 6, 7, np.nan, np.nan])})
assert_frame_equal(df, expected)
# GH 7656
df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {
'A': np.nan, 'B': 'Test', 'C': np.nan}])
expected = df.where(~isna(df), None)
with tm.assert_raises_regex(TypeError, 'boolean setting '
'on mixed-type'):
df.where(~isna(df), None, inplace=True)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10, 3))
df.iloc[3:5, 0] = np.nan
df.iloc[4:6, 1] = np.nan
df.iloc[5:8, 2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notna(df), df.mean(), axis='columns')
assert_frame_equal(result, expected)
df.where(pd.notna(df), df.mean(), inplace=True, axis='columns')
assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x > 0, y), y=df[0])
result = df.where(df > 0, df[0], axis='index')
assert_frame_equal(result, expected)
result = df.where(df > 0, df[0], axis='rows')
assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(pd.notna(df), DataFrame(
1, index=df.index, columns=df.columns))
assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame(
[[1 + 1j, 2], [np.nan, 4 + 1j]], columns=['a', 'b'])
df = DataFrame([[1 + 1j, 2], [5 + 1j, 4 + 1j]], columns=['a', 'b'])
df[df.abs() >= 5] = np.nan
assert_frame_equal(df, expected)
def test_where_axis(self):
# GH 9736
df = DataFrame(np.random.randn(2, 2))
mask = DataFrame([[False, False], [False, False]])
s = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype='int64')
mask = DataFrame([[False, False], [False, False]])
s = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, np.nan], [0, np.nan]])
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
expected = DataFrame({0: np.array([0, 0], dtype='int64'),
1: np.array([np.nan, np.nan], dtype='float64')})
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Multiple dtypes (=> multiple Blocks)
df = pd.concat([
DataFrame(np.random.randn(10, 2)),
DataFrame(np.random.randint(0, 10, size=(10, 2)), dtype='int64')],
ignore_index=True, axis=1)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis='columns')
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype('int64')
expected[3] = expected[3].astype('int64')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s1, axis='columns', inplace=True)
assert_frame_equal(result, expected)
result = df.where(mask, s2, axis='index')
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype('int64')
expected[3] = expected[3].astype('int64')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s2, axis='index', inplace=True)
assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
expected = df.copy()
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
assert_frame_equal(result, expected)
result = df.where(mask, d1, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True, axis='index')
assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
assert_frame_equal(result, expected)
result = df.where(mask, d2, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True, axis='columns')
assert_frame_equal(result, expected)
def test_where_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.where(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[2, 3, 4], [5, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df > 4, df + 1))
# return ndarray and scalar
result = df.where(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[99, 2, 99], [4, 99, 6], [99, 8, 99]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df % 2 == 0, 99))
# chain
result = (df + 2).where(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[13, 14, 15], [16, 17, 18], [9, 10, 11]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).where((df + 2) > 8, (df + 2) + 10))
def test_where_tz_values(self, tz_naive_fixture):
df1 = DataFrame(DatetimeIndex(['20150101', '20150102', '20150103'],
tz=tz_naive_fixture),
columns=['date'])
df2 = DataFrame(DatetimeIndex(['20150103', '20150104', '20150105'],
tz=tz_naive_fixture),
columns=['date'])
mask = DataFrame([True, True, False], columns=['date'])
exp = DataFrame(DatetimeIndex(['20150101', '20150102', '20150105'],
tz=tz_naive_fixture),
columns=['date'])
result = df1.where(mask, df2)
assert_frame_equal(exp, result)
def test_mask(self):
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rs = df.where(cond, np.nan)
assert_frame_equal(rs, df.mask(df <= 0))
assert_frame_equal(rs, df.mask(~cond))
other = DataFrame(np.random.randn(5, 3))
rs = df.where(cond, other)
assert_frame_equal(rs, df.mask(df <= 0, other))
assert_frame_equal(rs, df.mask(~cond, other))
# see gh-21891
df = DataFrame([1, 2])
res = df.mask([[True], [False]])
exp = DataFrame([np.nan, 2])
tm.assert_frame_equal(res, exp)
def test_mask_inplace(self):
# GH8801
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rdf = df.copy()
rdf.where(cond, inplace=True)
assert_frame_equal(rdf, df.where(cond))
assert_frame_equal(rdf, df.mask(~cond))
rdf = df.copy()
rdf.where(cond, -df, inplace=True)
assert_frame_equal(rdf, df.where(cond, -df))
assert_frame_equal(rdf, df.mask(~cond, -df))
def test_mask_edge_case_1xN_frame(self):
# GH4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[nan, 2]])
assert_frame_equal(res, expec)
def test_mask_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.mask(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df > 4, df + 1))
# return ndarray and scalar
result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99))
# chain
result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).mask((df + 2) > 8, (df + 2) + 10))
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
assert_frame_equal(self.frame.head(0), self.frame[0:0])
assert_frame_equal(self.frame.tail(0), self.frame[0:0])
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df[0:0])
assert_frame_equal(df.tail(0), df[0:0])
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
# test empty dataframe
empty_df = DataFrame()
assert_frame_equal(empty_df.tail(), empty_df)
assert_frame_equal(empty_df.head(), empty_df)
def test_type_error_multiindex(self):
# See gh-12218
df = DataFrame(columns=['i', 'c', 'x', 'y'],
data=[[0, 0, 1, 2], [1, 0, 3, 4],
[0, 1, 1, 2], [1, 1, 3, 4]])
dg = df.pivot_table(index='i', columns='c',
values=['x', 'y'])
with tm.assert_raises_regex(TypeError, "is an invalid key"):
str(dg[:, 0])
index = Index(range(2), name='i')
columns = MultiIndex(levels=[['x', 'y'], [0, 1]],
labels=[[0, 1], [0, 0]],
names=[None, 'c'])
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
assert_frame_equal(result, expected)
name = ('x', 0)
index = Index(range(2), name='i')
expected = Series([1, 3], index=index, name=name)
result = dg['x', 0]
assert_series_equal(result, expected)
def test_interval_index(self):
# GH 19977
index = pd.interval_range(start=0, periods=3)
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=index,
columns=['A', 'B', 'C'])
expected = 1
result = df.loc[0.5, 'A']
assert_almost_equal(result, expected)
index = pd.interval_range(start=0, periods=3, closed='both')
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=index,
columns=['A', 'B', 'C'])
index_exp = pd.interval_range(start=0, periods=2,
freq=1, closed='both')
expected = pd.Series([1, 4], index=index_exp, name='A')
result = df.loc[1, 'A']
assert_series_equal(result, expected)
class TestDataFrameIndexingDatetimeWithTZ(TestData):
def setup_method(self, method):
self.idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
self.dr = date_range('20130110', periods=3)
self.df = DataFrame({'A': self.idx, 'B': self.dr})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# assert that A & C are not sharing the same base (e.g. they
# are copies)
b1 = df._data.blocks[1]
b2 = df._data.blocks[2]
assert b1.values.equals(b2.values)
assert id(b1.values.values.base) != id(b2.values.values.base)
# with nan
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notna(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, df.dtypes)
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype, 'M8[ns, US/Eastern'
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
def test_scalar_assignment(self):
# issue #19843
df = pd.DataFrame(index=(0, 1, 2))
df['now'] = pd.Timestamp('20130101', tz='UTC')
expected = pd.DataFrame(
{'now': pd.Timestamp('20130101', tz='UTC')}, index=[0, 1, 2])
tm.assert_frame_equal(df, expected)
class TestDataFrameIndexingUInt64(TestData):
def setup_method(self, method):
self.ir = Index(np.arange(3), dtype=np.uint64)
self.idx = Index([2**63, 2**63 + 5, 2**63 + 10], name='foo')
self.df = DataFrame({'A': self.idx, 'B': self.ir})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# With NaN: because uint64 has no NaN element,
# the column should be cast to object.
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notna(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, Series([np.dtype('uint64'),
np.dtype('O'), np.dtype('O')],
index=['A', 'B', 'C']))
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype == np.dtype('uint64')
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
class TestDataFrameIndexingCategorical(object):
def test_assignment(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = Categorical(["{0} - {1}".format(i, i + 499)
for i in range(0, 10000, 500)])
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), CategoricalDtype(categories=labels,
ordered=False)],
index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False)],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
tm.assert_categorical_equal(result1._data._block.values, d)
# sorting
s.name = 'E'
tm.assert_series_equal(result2.sort_index(), s.sort_index())
cat = Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = DataFrame(Series(cat))
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = Categorical(["a", "a", "a", "a", "a", "a", "a"],
categories=["a", "b"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"],
categories=["a", "b"])
idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = DataFrame({"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"],
categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = DataFrame({"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = DataFrame({"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
pytest.raises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
pytest.raises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
pytest.raises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list('bb'), categories=list('abc'))
with pytest.raises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list('cc'), categories=list('abc'))
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
pytest.raises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
pytest.raises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
pytest.raises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["b", "b"], categories=["a", "b", "c"])
with pytest.raises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", df.columns[0]] = "c"
pytest.raises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
pytest.raises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
pytest.raises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["b", "b"], categories=["a", "b", "c"])
with pytest.raises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
df.loc["j":"k", df.columns[0]] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
pytest.raises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
pytest.raises(ValueError, f)
# fancy indexing
catsf = Categorical(["a", "a", "c", "c", "a", "a", "a"],
categories=["a", "b", "c"])
idxf = Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
# category c is kept in .categories
tm.assert_frame_equal(df, exp_fancy)
# set_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
pytest.raises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = DataFrame({"a": [1, 1, 1, 1, 1], "b": list("aaaaa")})
exp = DataFrame({"a": [1, "b", "b", 1, 1], "b": list("aabba")})
df.loc[1:2, "a"] = Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
def test_functions_no_warnings(self):
df = DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
|
bsd-3-clause
|
stefanseefeld/numba
|
examples/mandel/mandel_vectorize.py
|
3
|
1448
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys
from numba import vectorize
import numpy as np
from timeit import default_timer as timer
from matplotlib.pylab import imshow, jet, show, ion
sig = 'uint8(uint32, f4, f4, f4, f4, uint32, uint32, uint32)'
@vectorize([sig], target='cuda')
def mandel(tid, min_x, max_x, min_y, max_y, width, height, iters):
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
x = tid % width
y = tid / width
real = min_x + x * pixel_size_x
imag = min_y + y * pixel_size_y
c = complex(real, imag)
z = 0.0j
for i in range(iters):
z = z * z + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return i
return 255
def create_fractal(min_x, max_x, min_y, max_y, width, height, iters):
tids = np.arange(width * height, dtype=np.uint32)
return mandel(tids, np.float32(min_x), np.float32(max_x), np.float32(min_y),
np.float32(max_y), np.uint32(height), np.uint32(width),
np.uint32(iters))
def main():
width = 500 * 10
height = 750 * 10
ts = timer()
pixels = create_fractal(-2.0, 1.0, -1.0, 1.0, width, height, 20)
te = timer()
print('time: %f' % (te - ts))
image = pixels.reshape(width, height)
#print(image)
imshow(image)
show()
if __name__ == '__main__':
main()
|
bsd-2-clause
|
iamharshit/ML_works
|
Movie Review Analyser/KaggleWord2VecUtility.py
|
2
|
2087
|
import re
import nltk
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
class KaggleWord2VecUtility(object):
"""KaggleWord2VecUtility is a utility class for processing raw HTML text into segments for further learning"""
@staticmethod
def review_to_wordlist( review, remove_stopwords=False ):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
review_text = BeautifulSoup(review, 'lxml').get_text()
#
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#
# 3. Convert words to lower case and split them
words = review_text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
#
# 5. Return a list of words
return(words)
# Define a function to split a review into parsed sentences
@staticmethod
def review_to_sentences( review, tokenizer, remove_stopwords=False ):
# Function to split a review into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
raw_sentences = tokenizer.tokenize(review.decode('utf8').strip())
#
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append( KaggleWord2VecUtility.review_to_wordlist( raw_sentence, \
remove_stopwords ))
#
# Return the list of sentences (each sentence is a list of words,
# so this returns a list of lists
return sentences
|
mit
|
AlchemicalChest/Gaussian-Process-with-Stochastic-Variational-Inference
|
GPSVI/test/playtoymoon.py
|
1
|
1736
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 22:27:54 2015
@author: Ziang
"""
import numpy as np
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from GPClassifier import GPClassifier
np.random.seed(0)
xdata, ydata = datasets.make_moons(n_samples=400, noise=0.1)
from sklearn.cross_validation import train_test_split
x_tr, x_te, y_tr, y_te = train_test_split(xdata, ydata, test_size=0.50)
colors = ['r','g','b']
plt.figure(2)
ax = plt.subplot(221)
ax.set_title('Original Testing Data')
ax.scatter(x_te[:,0], x_te[:,1], c=[colors[y] for y in y_te], s=40)
clf = GPClassifier(x_tr, y_tr, x_te, y_te, \
alpha=1.0, max_iter=200, num_inducing_points=40, \
learning_rate=0.01, verbose=3)
#clf.score(x_tr, y_tr, x_te, y_te)
clf.fit()
pd = clf.predict(x_te)
print('SVI error = {}'.format(np.sum(len(np.where(pd != y_te)[0])) / float(x_te.shape[0])))
plt.figure(2)
ax = plt.subplot(222)
ax.set_title('GP with Stochastic Variational Inference')
ax.scatter(x_te[:,0], x_te[:,1], c=[colors[y] for y in pd], s=40)
clf = SVC()
clf.fit(x_tr, y_tr)
pd = clf.predict(x_te)
print('SVM error = {}'.format(np.sum(len(np.where(pd != y_te)[0])) / float(x_te.shape[0])))
plt.figure(2)
ax = plt.subplot(223)
ax.set_title('SVM with RBF Kernel')
ax.scatter(x_te[:,0], x_te[:,1], c=[colors[y] for y in pd], s=40)
clf = LogisticRegression()
clf.fit(x_tr, y_tr)
pd = clf.predict(x_te)
print('Logistic error = {}'.format(np.sum(len(np.where(pd != y_te)[0])) / float(x_te.shape[0])))
plt.figure(2)
ax = plt.subplot(224)
ax.set_title('Logistic Regression')
ax.scatter(x_te[:,0], x_te[:,1], c=[colors[y] for y in pd], s=40)
|
mit
|
f3r/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
168
|
1793
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
|
bsd-3-clause
|
detrout/debian-statsmodels
|
statsmodels/examples/ex_kernel_semilinear_dgp.py
|
33
|
4969
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
class UnivariateFunc1a(dgp.UnivariateFunc1):
def het_scale(self, x):
return 0.5
seed = np.random.randint(999999)
#seed = 430973
#seed = 47829
seed = 648456 #good seed for het_scale = 0.5
print(seed)
np.random.seed(seed)
nobs, k_vars = 300, 3
x = np.random.uniform(-2, 2, size=(nobs, k_vars))
xb = x.sum(1) / 3 #beta = [1,1,1]
k_vars_lin = 2
x2 = np.random.uniform(-2, 2, size=(nobs, k_vars_lin))
funcs = [#dgp.UnivariateFanGijbels1(),
#dgp.UnivariateFanGijbels2(),
#dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
UnivariateFunc1a(x=xb)
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
y = f.y + x2.sum(1)
model = smke.SemiLinear(y, x2, x, 'ccc', k_vars_lin)
mean, mfx = model.fit()
ax = fig.add_subplot(1, 1, i+1)
f.plot(ax=ax)
xb_est = np.dot(model.exog, model.b)
sortidx = np.argsort(xb_est) #f.x)
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, label='est. mean')
# ax.plot(f.x, mean0, color='g', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
print('beta', model.b)
print('scale - est', (y - (xb_est+mean)).std())
print('scale - dgp realised, true', (y - (f.y_true + x2.sum(1))).std(), \
2 * f.het_scale(1))
fittedvalues = xb_est + mean
resid = np.squeeze(model.endog) - fittedvalues
print('corrcoef(fittedvalues, resid)', np.corrcoef(fittedvalues, resid)[0,1])
print('variance of components, var and as fraction of var(y)')
print('fitted values', fittedvalues.var(), fittedvalues.var() / y.var())
print('linear ', xb_est.var(), xb_est.var() / y.var())
print('nonparametric', mean.var(), mean.var() / y.var())
print('residual ', resid.var(), resid.var() / y.var())
print('\ncovariance decomposition fraction of var(y)')
print(np.cov(fittedvalues, resid) / model.endog.var(ddof=1))
print('sum', (np.cov(fittedvalues, resid) / model.endog.var(ddof=1)).sum())
print('\ncovariance decomposition, xb, m, resid as fraction of var(y)')
print(np.cov(np.column_stack((xb_est, mean, resid)), rowvar=False) / model.endog.var(ddof=1))
fig.suptitle('Kernel Regression')
fig.show()
alpha = 0.7
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(f.x[sortidx], f.y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
sortidx = np.argsort(xb_est + mean)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(f.x[sortidx], y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(f.x[sortidx], (xb_est + mean)[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Semilinear Model - observed and total fitted')
fig = plt.figure()
# ax = fig.add_subplot(1, 2, 1)
# ax.plot(f.x, f.y, 'o', color='b', lw=2, alpha=alpha, label='observed')
# ax.plot(f.x, f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
# ax.plot(f.x, mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')
# ax.legend(loc='upper left')
sortidx0 = np.argsort(xb)
ax = fig.add_subplot(1, 2, 1)
ax.plot(f.y[sortidx0], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true[sortidx0], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean[sortidx0], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (sorted by true xb)')
ax = fig.add_subplot(1, 2, 2)
ax.plot(y - xb_est, 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (nonparametric)')
plt.figure()
plt.plot(y, xb_est+mean, '.')
plt.title('observed versus fitted values')
plt.show()
|
bsd-3-clause
|
uglyboxer/linear_neuron
|
net-p3/lib/python3.5/site-packages/sklearn/metrics/metrics.py
|
233
|
1262
|
import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
|
mit
|
ocons/kNN-GP-model
|
knn-GP-model.py
|
1
|
2394
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from sklearn import neighbors
from sklearn import gaussian_process
# Import dataset
data = pd.read_csv('datasetA.csv', header=None)
data = data.as_matrix()
L1,L2 = data.shape
L2 = L2-1
ref = data[:,L2]
# Cut one period out of dataset
d1 = (ref==ref.max()).argmax() # index 360
d2 = L1-(ref[::-1]==ref.min()).argmax() # index 0
ref = data[d1:d2,L2]
D = data[d1:d2,0:L2]
L1,L2 = D.shape
# Plot data
fig = plt.figure()
ax = fig.gca(projection='3d')
X,Y = np.arange(0, L2, 1), np.arange(0, 360, 1)
X,Y = np.meshgrid(X, Y)
Z = D[0:L1:1000,:]
surf = ax.plot_surface(X,Y,Z,rstride=1,cstride=1,cmap=cm.hot,linewidth=0)
# K-NN regression model
X = D[0:L1:360,:]
y = ref[0:L1:360]
nn = 5 # number nearest neighbors
knn = neighbors.KNeighborsRegressor(nn, weights='uniform')
# Fit K-NN model
knn.fit(X, y);
# Two GP regression models: one in range 0-180, another in range 180-360
M = (ref==180).argmax()
target_1 = ref[0:M:180]
target_2 = ref[M::180]
trainD_1 = D[0:M:180,0:L2]
trainD_2 = D[M::180,0:L2]
gp1 = gaussian_process.GaussianProcess(theta0=1e-2, thetaL=1e-4, thetaU=1e-1)
gp2 = gaussian_process.GaussianProcess(theta0=1e-2, thetaL=1e-4, thetaU=1e-1)
# Fit GP models
gp1.fit(trainD_1, target_1);
gp2.fit(trainD_2, target_2);
# Prediction on 3600 images
x = D[0:L1:100,0:L2]
y_true = ref[0:L1:100]
x_class = (knn.predict(x)<180)+0
x1 = x[x_class==0,:]
x2 = x[x_class==1,:]
y_true1 = y_true[x_class==0]
y_true2 = y_true[x_class==1]
y_pred1, s2_pred1 = gp1.predict(x1, eval_MSE=True)
y_pred2, s2_pred2 = gp2.predict(x2, eval_MSE=True)
y_true = np.concatenate([y_true1,y_true2])
y_pred = np.concatenate([y_pred1,y_pred2])
# Accuracy
acc = np.mean(np.sqrt((y_pred-y_true)**2))
print('Average accuracy (3600 test images):',acc)
# Histogram (error)
vet = np.zeros(len(y_true))
for i in range(0,len(y_true)):
vet[i] = y_pred[i]-y_true[i]
plt.hist(vet, 3000)
axes = plt.gca()
axes.set_xlim([-1.0,1.0]);
plt.xlabel('error = (prediction - reference)')
plt.ylabel('counts')
# Plot predictions vs references
plt.figure()
plt.plot(y_true[0::],y_pred[0::],'r.')
plt.plot([0,360],[0,360],'k')
plt.ylabel('reference')
plt.xlabel('prediction')
axes = plt.gca()
axes.set_xlim([-20,360+20]);
axes.set_ylim([-20,360+20]);
|
mit
|
detrout/debian-statsmodels
|
statsmodels/datasets/copper/data.py
|
28
|
2316
|
"""World Copper Prices 1951-1975 dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "World Copper Market 1951-1975 Dataset"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """World Copper Market 1951-1975"""
DESCRLONG = """This data describes the world copper market from 1951 through 1975. In an
example, in Gill, the outcome variable (of a 2 stage estimation) is the world
consumption of copper for the 25 years. The explanatory variables are the
world consumption of copper in 1000 metric tons, the constant dollar adjusted
price of copper, the price of a substitute, aluminum, an index of real per
capita income base 1970, an annual measure of manufacturer inventory change,
and a time trend.
"""
NOTE = """
Number of Observations - 25
Number of Variables - 6
Variable name definitions::
WORLDCONSUMPTION - World consumption of copper (in 1000 metric tons)
COPPERPRICE - Constant dollar adjusted price of copper
INCOMEINDEX - An index of real per capita income (base 1970)
ALUMPRICE - The price of aluminum
INVENTORYINDEX - A measure of annual manufacturer inventory trend
TIME - A time trend
Years are included in the data file though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/copper.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
def load_pandas():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
|
bsd-3-clause
|
0asa/scikit-learn
|
examples/linear_model/plot_lasso_coordinate_descent_path.py
|
254
|
2639
|
"""
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
alphacsc/alphacsc
|
examples/other/plot_simulate_swm.py
|
1
|
3342
|
"""
=====================
SWM on simulated data
=====================
This example shows how the sliding window method (SWM) [1]
works on simulated data. The code is adapted from the
`neurodsp package <https://github.com/voytekresearch/neurodsp/>`_
from Voytek lab. Note that, at present, it does not
implement parallel tempering.
[1] Gips, Bart, et al.
Discovering recurring patterns in electrophysiological recordings.
Journal of neuroscience methods 275 (2017): 66-79.
"""
# Authors: Scott Cole
# Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
###############################################################################
# Let us define the model parameters
n_times_atom = 64 # L
n_times = 5000 # T
n_trials = 10 # N
###############################################################################
# The algorithm does not naturally lend itself to multiple atoms. Therefore,
# we simulate only one atom.
n_atoms = 1 # K
###############################################################################
# A minimum spacing between the windows averaged must be found.
min_spacing = 200 # G
###############################################################################
# Now, we can simulate
from alphacsc import check_random_state
from alphacsc.simulate import simulate_data
random_state_simulate = 1
X, ds_true, z_true = simulate_data(n_trials, n_times, n_times_atom,
n_atoms, random_state_simulate,
constant_amplitude=True)
rng = check_random_state(random_state_simulate)
X += 0.01 * rng.randn(*X.shape)
###############################################################################
# We expect 10 occurences of the atom in total.
# So, let us define 10 random locations for the algorithm to start with.
# If this number is not known, we will end up estimating more/less windows.
import numpy as np
window_starts = rng.choice(np.arange(n_trials * n_times), size=n_trials)
###############################################################################
# Now, we apply the SWM algorithm now.
from alphacsc.other.swm import sliding_window_matching
random_state = 42
X = X.reshape(X.shape[0] * X.shape[1]) # expects 1D time series
d_hat, window_starts, J = sliding_window_matching(
X, L=n_times_atom, G=min_spacing, window_starts_custom=window_starts,
max_iterations=10000, T=0.01, random_state=random_state)
###############################################################################
# Let us look at the data at the time windows when the atoms are found.
import matplotlib.pyplot as plt
fig, axes = plt.subplots(2, n_trials // 2, sharex=True, sharey=True,
figsize=(15, 3))
axes = axes.ravel()
for ax, w_start in zip(axes, window_starts):
ax.plot(X[w_start:w_start + n_times_atom])
###############################################################################
# It is not perfect, but it does find time windows where the atom
# is present. Now let us plot the atoms.
plt.figure()
plt.plot(d_hat / np.linalg.norm(d_hat))
plt.plot(ds_true.T, '--')
###############################################################################
# and the cost function over iterations
plt.figure()
plt.plot(J)
plt.ylabel('Cost function J')
plt.xlabel('Iteration #')
plt.show()
|
bsd-3-clause
|
stefangri/s_s_productions
|
PHY641/V27_Zeeman/Messdaten/auswertung_blau_pi/auswertung_blau.py
|
1
|
3693
|
import numpy as np
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import nominal_values as noms
from uncertainties.unumpy import std_devs as stds
from uncertainties import correlated_values
import math
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from pint import UnitRegistry
import latex as l
r = l.Latexdocument('results.tex')
u = UnitRegistry()
Q_ = u.Quantity
#import pandas as pd
#from pandas import Series, DataFrame
import matplotlib.image as mpimg
import scipy.constants as const
#series = pd.Series(data, index=index)
# d = pd.DataFrame({'colomn': series})
#FITFUNKTIONEN
def hysterese(I, a, b, c, d):
return a * I**3 + b * I**2 + c * I + d
#######LOAD DATA#########
B_up, B_down = np.genfromtxt('../data/hysterese.txt', unpack=True) #Flussdichte bei auf und absteigendem Spulenstrom
I = np.linspace(0, 20, 21) # Stromstärke von 0 bis 20A in 1A Schritten
#l.Latexdocument('tabs/hysterese.tex').tabular(
#data = [I, B_up, B_down], #Data incl. unpuarray
#header = [r'I / \ampere', r'B\ua{auf} / \milli\tesla', r'B\ua{ab} / \milli\tesla'],
#places = [1, 0, 0],
#caption = r'Gemessene magnetische Flussdichten $B\ua{i}$ bei auf- bzw. absteigenden Strom $I$.',
#label = 'hysterese')
###FIT DER HYSTERESE######
params_up, cov_up = curve_fit(hysterese, I, B_up)
params_down, cov_down = curve_fit(hysterese, I, B_down)
params_up = correlated_values(params_up, cov_up)
params_down = correlated_values(params_down, cov_down)
#Rechnungen mit den Positionen der Aufspaltung
peaks_blau_0 = np.genfromtxt('../data/peaks_blau_pi_I_0.txt', unpack = True)
peaks_blau_17 = np.genfromtxt('../data/peaks_blau_pi_I_17.txt', unpack = True)
#l.Latexdocument('../tabs/peaks_blau_pi.tex').tabular(
#data = [peaks_blau_0, unp.uarray(peaks_blau_17[:10], peaks_blau_17[10:])],
#header = ['x_0 / px', 'x_{17} / px'],
#places = [0, (4.0, 4.0)],
#caption = r'Blaue Pi Aufspaltung: Positionen $x_0$ und $x_{17}$ der Intensitätsmaxima unter $I= \SI{0}{\ampere}$ und $I= \SI{17}{\ampere}$.',
#label = 'tab: peaks_blau_pi')
delta_s_blau = peaks_blau_0[1:] - peaks_blau_0[:-1]
del_s_blau = (peaks_blau_17[1:] - peaks_blau_17[:-1])[::2]
del_s_mid = [(del_s_blau[i] + del_s_blau[i+1])/2 for i in range(0, len(del_s_blau)-1)]
lambda_blau = Q_(480.0, 'nanometer').to('meter')
d = Q_(4, 'millimeter').to('meter')
c = Q_(const.c, 'meter / second')
h = Q_(const.h, 'joule * second')
mu_B = Q_(const.physical_constants['Bohr magneton'][0], 'joule / tesla')
n_blau = 1.41735
del_lambda_blau = (1/2 * lambda_blau**2 / (2 * d * np.sqrt(n_blau**2 - 1) ) * (del_s_mid / delta_s_blau)).to('picometer')
delta_E_blau = (h * c / lambda_blau**2 * del_lambda_blau).to('eV')
g_blau = (delta_E_blau / (mu_B * Q_(hysterese(17, *params_up), 'millitesla'))).to('dimensionless')
g_blau_mid = np.mean(g_blau)
g_blau = unp.uarray(noms(g_blau), stds(g_blau))
print(g_blau_mid)
print('B-Feldstärken: ', hysterese(0, *params_up), hysterese(17, *params_up))
#l.Latexdocument('../tabs/abstände_blau_pi.tex').tabular(
#data = [delta_s_blau, del_s_mid, del_lambda_blau.magnitude, delta_E_blau.magnitude*1e5, g_blau], #Data incl. unpuarray
#header = [r'\Delta s_i / px', r'\frac{\delta s_i + \delta s_{i+1}}{2} / px', r'\Delta \lambda / \pico\meter',
# r'\Delta E / 10^{-5}\electronvolt', 'g / '],
#places = [0, 0, 1, 1, (1.3, 1.3)],
#caption = r'Blaue Pi Aufspaltung: Abstände zwischen den unaufgespaltenen Linien $\Delta s_i$ und gemittelte Abstände \frac{\delta s_i + \delta s_{i+1}}{2}. Wellenlängenverschiebung $\Delta \lambda$, '
# + 'Energieaufspaltung $\Delta E$ und berechneter Übergangs-Landé-Faktor g.',
#label = 'abstände_blau_pi')
|
mit
|
voxlol/scikit-learn
|
sklearn/tests/test_common.py
|
127
|
7665
|
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
|
bsd-3-clause
|
CoffeRobot/fato
|
pose_estimation/src/points3d_demo.py
|
1
|
19840
|
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import warnings
import transformations as tf
import pose_estimation as pose_estimate
import utilities as ut
import kalman_pose as kpose
def randrange(n, vmin, vmax):
return (vmax - vmin)*np.random.rand(n) + vmin
def create_points(n):
xs = randrange(n, -5, 5)
ys = randrange(n, -5, 5)
zs = randrange(n, 49, 50)
points = np.zeros([4,n])
points[0,:] = xs
points[1,:] = ys
points[2,:] = zs
points[3,:] = 1
return points
def create_image_points(n,w,h):
xs = randrange(n, 0, w)
ys = randrange(n, 0, h)
zs = randrange(n, 0.49, 0.50)
points = np.zeros([3,n])
points[0,:] = xs
points[1,:] = ys
points[2,:] = 1
#points[3,:] = 1
return points, zs
def create_fixed_points(n,cx,cy):
xs = np.zeros(n*n)
ys = np.zeros(n*n)
zs = np.zeros(n*n)
points = np.ones([3,n*n])
for i in range(0,n):
for j in range(0,n):
val_x = cx + (i - n/2) * 3
val_y = cy + (j - n/2) * 3
points[0,i+j*n] = val_x
points[1,i+j*n] = val_y
zs = 0.5
return points,zs
def get_lq_data(prev_pts, next_pts, pts_d, camera):
X = np.empty((0, 6), float)
Y = np.empty((0, 1), float)
[r,c] = prev_pts.shape
if r < c and (r == 3 or r == 4):
warnings.warn('points must be in the form Nx3, transposing matrix')
prev_pts = prev_pts.T
next_pts = next_pts.T
num_pts = prev_pts.shape[0]
valid_pts = 0
fx = camera[0,0]
fy = camera[1,1]
cx = camera[0,2]
cy = camera[1,2]
focal = (fx + fy)/2.0
f_x = focal
f_y = focal
for i in range(0, num_pts):
prev_pt = prev_pts[i]
next_pt = next_pts[i]
mz = pts_d[i]
x = next_pt[0] - cx
y = next_pt[1] - cy
xv = next_pt[0] - prev_pt[0]
yv = next_pt[1] - prev_pt[1]
# assumption of knowing mz but it has to be > 0
if not np.isnan(mz) > 0:
valid_pts += 1
eq_x = np.zeros([1,6])
eq_y = np.zeros([1,6])
eq_x[0,0] = focal / mz
eq_x[0,1] = 0
eq_x[0,2] = -x/mz
eq_x[0,3] = - x * y / focal
eq_x[0,4] = focal + (x * x) / focal
eq_x[0,5] = -y
eq_y[0,0] = 0
eq_y[0,1] = focal / mz
eq_y[0,2] = -y/mz
eq_y[0,3] = -focal + (y*y) / focal
eq_y[0,4] = x * y / focal
eq_y[0,5] = x
X = np.append(X, eq_x, axis=0)
X = np.append(X, eq_y, axis=0)
Y = np.append(Y, xv)
Y = np.append(Y, yv)
return X,Y
def get_camera_matrix():
fx = 649.6468505859375
fy = 649.00091552734375
cx = 322.32084374845363
cy = 221.2580892472088
return np.array([[fx, 0, cx, 0], [0, fy, cy, 0], [0, 0, 1, 0]])
class test_data:
X = []
Y = []
X_n = []
Y_n = []
ls_beta = []
ls_pose = []
me_beta = []
me_pose = []
ransac_beta = []
ransac_pose = []
ls_noise_beta = []
ls_noise_pose = []
me_noise_beta = []
me_noise_pose = []
ransac_noise_beta = []
ransac_noise_pose = []
kf_pose_ls = kpose.init_filter(1)
kf_pose_me = kpose.init_filter(1)
kf_pose_ran = kpose.init_filter(1)
fx = 649.6468505859375
fy = 649.00091552734375
cx = 322.32084374845363
cy = 221.2580892472088
init_pts_2d = []
init_pts_3d = []
prev_pts_2d = []
prev_pts_3d = []
next_pts_2d = []
next_pts_3d = []
x_flow = []
y_flow = []
prev_noise_2d = []
prev_noise_3d = []
next_noise_2d = []
next_noise_3d = []
x_flow_noise = []
y_flow_noise = []
gt_position = []
ls_position = []
m_position = []
r_position = []
ls_position_noise = []
m_position_noise = []
r_position_noise = []
pose_ls = []
pose_m = []
pose_gt = []
camera = []
projection = []
d_pts = []
add_noise = False
noise_percentage = .2
noise_mu = 0
noise_sigma = 1.5
ls_errors = np.empty((0, 1), float)
m_errors = np.empty((0, 1), float)
r_errors = np.empty((0, 1), float)
ls_noise_errors = np.empty((0, 1), float)
m_noise_errors = np.empty((0, 1), float)
r_noise_errors = np.empty((0, 1), float)
m_iters = 10
ransac_iters = 5
def gen_data(self, num_points):
cam = np.array([[self.fx, 0, self.cx, 0], [0, self.fy, self.cy, 0], [0, 0, 1, 0]])
self.camera = cam
camera_inv = np.linalg.inv(cam[0:3,0:3])
[self.init_pts_2d, pts_d] = create_image_points(num_points, 640, 480)
self.init_pts_3d = np.dot(camera_inv, self.init_pts_2d[0:3,:]) * pts_d
self.init_pts_3d = np.vstack([self.init_pts_3d, np.ones(num_points)])
self.ls_position = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.ls_position_noise = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.m_position = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.m_position_noise = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.r_position = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.r_position_noise = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.prev_pts_2d = self.init_pts_2d.copy()
self.prev_pts_3d = self.init_pts_3d.copy()
self.prev_noise_2d = self.init_pts_2d.copy()
self.prev_noise_3d = self.init_pts_3d.copy()
def get_fixed_data(self, num_pts):
cam = np.array([[self.fx, 0, self.cx, 0], [0, self.fy, self.cy, 0], [0, 0, 1, 0]])
self.camera = cam
camera_inv = np.linalg.inv(cam[0:3,0:3])
[self.init_pts_2d, pts_d] = create_fixed_points(num_pts, self.cx, self.cy)
num_points = self.init_pts_2d.shape[1]
self.init_pts_3d = np.dot(camera_inv, self.init_pts_2d[0:3,:]) * pts_d
self.init_pts_3d = np.vstack([self.init_pts_3d, np.ones(num_points)])
self.ls_position = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.ls_position_noise = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.m_position = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.m_position_noise = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.r_position = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.r_position_noise = tf.get_projection_matrix(np.array([0,0,0,0,0,0]))
self.prev_pts_2d = self.init_pts_2d.copy()
self.prev_pts_3d = self.init_pts_3d.copy()
self.prev_noise_2d = self.init_pts_2d.copy()
self.prev_noise_3d = self.init_pts_3d.copy()
def move_points(self,tx,ty,tz,rx,ry,rz):
self.projection = tf.get_projection_matrix(np.array([tx,ty,tz,rx,ry,rz]))
self.next_pts_3d = np.dot(self.projection, self.prev_pts_3d)
self.next_pts_2d = self.camera.dot(self.next_pts_3d)
self.next_pts_2d = self.next_pts_2d / self.next_pts_3d[2,:]
self.next_noise_2d = self.next_pts_2d.copy()
# print('prev')
# print(ut.to_string(self.prev_pts_2d))
# print('next')
# print(ut.to_string(self.next_pts_2d))
self.x_flow = self.next_pts_2d[0,:] - self.prev_pts_2d[0,:]
self.y_flow = self.next_pts_2d[1,:] - self.prev_pts_2d[1,:]
self.d_pts = self.next_pts_3d[2,:]
self.x_flow_noise = self.x_flow.copy()
self.y_flow_noise = self.y_flow.copy()
num_points = self.next_pts_3d.shape[1]
ids = np.arange(num_points)
np.random.shuffle(ids)
num_elems = int(num_points * self.noise_percentage)
elems = ids[0:num_elems]
x_noise = np.random.normal(self.noise_mu, self.noise_sigma, num_elems)
y_noise = np.random.normal(self.noise_mu, self.noise_sigma, num_elems)
for i in range(0,num_elems):
id = elems[i]
self.x_flow_noise[id] += x_noise[i]
self.y_flow_noise[id] += y_noise[i]
self.next_noise_2d[0,id] += x_noise[i]
self.next_noise_2d[1,id] += y_noise[i]
#[X,Y] = get_lq_data(self.prev_pts_2d, self.next_pts_2d, self.d_pts, self.camera)
[X,Y] = pose_estimate.get_normal_equations(self.prev_pts_2d.T, self.next_pts_2d.T, self.d_pts, self.camera)
[self.X_n, self.Y_n] = pose_estimate.get_normal_equations(self.prev_noise_2d.T, self.next_noise_2d.T, self.d_pts, self.camera)
self.X = X
self.Y = Y
self.calculate_ls(10, 10)
self.calculate_distance()
self.prev_pts_2d = self.next_pts_2d.copy()
self.prev_pts_3d = self.next_pts_3d.copy()
self.prev_noise_2d = self.next_noise_2d.copy()
self.prev_noise_3d = self.next_pts_3d.copy()
#self.plot_data()
def calculate_ls(self,iters, ransac_iters):
self.ls_beta = pose_estimate.least_square(self.X, self.Y)
self.ls_pose = tf.get_projection_matrix(self.ls_beta)
self.ls_position = self.ls_pose.dot(self.ls_position)
self.me_beta = pose_estimate.m_estimator(self.X, self.Y, iters)
self.me_pose = tf.get_projection_matrix(self.me_beta)
self.m_position = self.me_pose.dot(self.m_position)
init_beta = pose_estimate.ransac_ls(self.X,self.Y,ransac_iters,3)
self.ransac_beta = pose_estimate.m_estimator(self.X, self.Y, iters, init_beta)
self.ransac_pose = tf.get_projection_matrix(self.ransac_beta)
self.r_position = self.ransac_pose.dot(self.r_position)
self.ls_noise_beta = pose_estimate.least_square(self.X_n, self.Y_n)
self.ls_noise_pose = tf.get_projection_matrix(self.ls_noise_beta)
self.ls_position_noise = self.ls_noise_pose.dot(self.ls_position_noise)
self.me_noise_beta = pose_estimate.m_estimator(self.X_n, self.Y_n, iters)
self.me_noise_pose = tf.get_projection_matrix(self.me_noise_beta)
self.m_position_noise = self.me_noise_pose.dot(self.m_position_noise)
# init_beta = pose_estimate.ransac_ls(self.X,self.Y,50,3)
# self.ransac_noise_beta = pose_estimate.m_estimator(self.X_n, self.Y_n, iters, init_beta)
# self.ransac_noise_pose = tf.get_projection_matrix(self.ransac_noise_beta)
# self.r_position_noise = self.ransac_noise_pose.dot(self.r_position_noise)
bp = tf.pose_to_beta(self.me_pose)
self.kf_pose_ls.predict()
self.kf_pose_ls.update(bp.T)
print('-----')
print(ut.to_string(bp))
print(ut.to_string(self.kf_pose_ls.x.T))
print('-----')
# self.kf_pose_me.predict()
# self.kf_pose_me.update(self.me_noise_beta)
def print_poses(self):
print('gt pose\n' + ut.to_string(self.projection))
#print('ls pose\n' + ut.to_string(self.ls_pose))
print('m pose\n' + ut.to_string(self.me_pose))
#fprint('r pose\n' + ut.to_string(self.ransac_pose))
k_pose = self.kf_pose_ls.x
tmp = np.array([k_pose[0,0],k_pose[1,0],k_pose[2,0],k_pose[9,0],k_pose[10,0],k_pose[11,0]])
k_pose = tf.get_projection_matrix(tmp)
print(ut.to_string(self.kf_pose_ls.x.T))
print('k pose\n' + ut.to_string(k_pose))
def plot_data(self):
fig = plt.figure()
l1 = plt.plot(self.ls_errors,color='r',label="lsq")
l2 =plt.plot(self.m_errors,color='b',label='me')
l3 =plt.plot(self.r_errors,color='g',label='ran')
l4 =plt.plot(self.ls_noise_errors,'r--',label='lsq_n')
l5 =plt.plot(self.m_noise_errors,'b--',label='me_n')
l6 =plt.plot(self.r_noise_errors,'g--',label='ran_n')
plt.legend()
plt.show()
# ax = fig.add_subplot(111, projection='3d')
#
# pts = self.init_pts_3d
# proj_points = self.next_pts_3d
#
# ls_pts = np.dot(self.pose_ls,pts)
# m_pts = np.dot(self.pose_m,pts)
#
# ax.scatter(pts[0,:],pts[1,:],pts[2,:], c='r', marker='o')
# ax.scatter(proj_points[0,:],proj_points[1,:],proj_points[2,:], c='b', marker='^')
# ax.scatter(ls_pts[0,:],ls_pts[1,:],ls_pts[2,:], c='c', marker='x')
# ax.scatter(m_pts[0,:],m_pts[1,:],m_pts[2,:], c='y', marker='.')
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
#
#plt.show()
def calculate_distance(self):
pts = self.init_pts_3d
proj_pts = self.next_pts_3d
ls_pts = np.dot(self.ls_position, pts)
m_pts = np.dot(self.m_position, pts)
r_pts = np.dot(self.r_position, pts)
ls_pts_noise = np.dot(self.ls_position_noise, pts)
m_pts_noise = np.dot(self.m_position_noise, pts)
r_pts_noise = np.dot(self.r_position_noise, pts)
dist_ls = np.linalg.norm(ls_pts - proj_pts)
dist_m = np.linalg.norm(m_pts - proj_pts)
dist_r = np.linalg.norm(r_pts - proj_pts)
dist_m_noise = np.linalg.norm(m_pts_noise - proj_pts)
dist_ls_noise = np.linalg.norm(ls_pts_noise - proj_pts)
dist_r_noise = np.linalg.norm(r_pts_noise - proj_pts)
self.ls_errors = np.append(self.ls_errors , dist_ls)
self.m_errors = np.append(self.m_errors, dist_m)
self.r_errors = np.append(self.r_errors , dist_r)
self.m_noise_errors = np.append(self.m_noise_errors, dist_m_noise)
self.ls_noise_errors = np.append(self.ls_noise_errors, dist_ls_noise)
self.r_noise_errors = np.append(self.r_noise_errors, dist_r_noise)
#print(np.mean(pts - proj_pts,1))
#print(np.mean(ls_pts - proj_pts,1))
#print(np.mean(m_pts - proj_pts,1))
def add_noise_to_data(self, percentage):
self.add_noise = True
self.noise_percentage = percentage
def plot_errors(ls_err, m_err, r_err, lsn_err, mn_err, rn_err):
fig = plt.figure()
plt.subplot(131)
l1 = plt.plot(ls_err[0,:],color='r',label="lsq")
l2 =plt.plot(m_err[0,:],color='b',label='me')
l3 =plt.plot(r_err[0,:],color='g',label='ran')
l4 =plt.plot(lsn_err[0,:],'r--',label='lsq_n')
l5 =plt.plot(mn_err[0,:],'b--',label='me_n')
l6 =plt.plot(rn_err[0,:],'g--',label='ran_n')
plt.subplot(132)
l1 = plt.plot(ls_err[1,:],color='r',label="lsq")
l2 =plt.plot(m_err[1,:],color='b',label='me')
l3 =plt.plot(r_err[1,:],color='g',label='ran')
l4 =plt.plot(lsn_err[1,:],'r--',label='lsq_n')
l5 =plt.plot(mn_err[1,:],'b--',label='me_n')
l6 =plt.plot(rn_err[1,:],'g--',label='ran_n')
plt.subplot(133)
l1 = plt.plot(ls_err[2,:],color='r',label="lsq")
l2 =plt.plot(m_err[2,:],color='b',label='me')
l3 =plt.plot(r_err[2,:],color='g',label='ran')
l4 =plt.plot(lsn_err[2,:],'r--',label='lsq_n')
l5 =plt.plot(mn_err[2,:],'b--',label='me_n')
l6 =plt.plot(rn_err[2,:],'g--',label='ran_n')
plt.legend()
plt.show()
fig = plt.figure()
plt.subplot(131)
l1 = plt.plot(ls_err[3,:],color='r',label="lsq")
l2 =plt.plot(m_err[3,:],color='b',label='me')
l3 =plt.plot(r_err[3,:],color='g',label='ran')
l4 =plt.plot(lsn_err[3,:],'r--',label='lsq_n')
l5 =plt.plot(mn_err[3,:],'b--',label='me_n')
l6 =plt.plot(rn_err[3,:],'g--',label='ran_n')
plt.subplot(132)
l1 = plt.plot(ls_err[4,:],color='r',label="lsq")
l2 =plt.plot(m_err[4,:],color='b',label='me')
l3 =plt.plot(r_err[4,:],color='g',label='ran')
l4 =plt.plot(lsn_err[4,:],'r--',label='lsq_n')
l5 =plt.plot(mn_err[4,:],'b--',label='me_n')
l6 =plt.plot(rn_err[4,:],'g--',label='ran_n')
plt.subplot(133)
l1 = plt.plot(ls_err[5,:],color='r',label="lsq")
l2 =plt.plot(m_err[5,:],color='b',label='me')
l3 =plt.plot(r_err[5,:],color='g',label='ran')
l4 =plt.plot(lsn_err[5,:],'r--',label='lsq_n')
l5 =plt.plot(mn_err[5,:],'b--',label='me_n')
l6 =plt.plot(rn_err[5,:],'g--',label='ran_n')
plt.legend()
plt.show()
def run_esperiment():
data = test_data()
data.fx = 600
data.fy = 600
data.cx = 320
data.cy = 240
#data.gen_data(30)
data.get_fixed_data(5)
angle = np.deg2rad(2)
gt_tr = np.array([0.02,0.00,0.00,0,0,0])
num_iterations = 3
ls_errors = np.zeros([6,10])
m_errors = np.zeros([6,10])
r_errors = np.zeros([6,10])
lsn_errors = np.zeros([6,10])
mn_errors = np.zeros([6,10])
rn_errors = np.zeros([6,10])
for i in range(0,num_iterations):
data.move_points(gt_tr[0],gt_tr[1],gt_tr[2],gt_tr[3],gt_tr[4],gt_tr[5])
ls_errors[:,i] = abs(gt_tr - data.ls_beta)
m_errors[:,i] = abs(gt_tr - data.me_beta)
r_errors[:,i] = abs(gt_tr - data.ransac_beta)
lsn_errors[:,i] = abs(gt_tr - data.ls_noise_beta)
mn_errors[:,i] = abs(gt_tr - data.me_noise_beta)
#rn_errors[:,i] = abs(gt_tr - data.ransac_noise_beta)
precision = 5
# print('gt ' + ut.to_string(gt_tr))
# print('lsq ' + ut.to_string(data.ls_beta,precision))
# print('m ' + ut.to_string(data.me_beta,precision))
# print('r ' + ut.to_string(data.ransac_beta,precision))
# print('lssn ' + ut.to_string(data.ls_noise_beta,precision))
# print('mn ' + ut.to_string(data.me_noise_beta,precision))
# #print('rn ' + ut.to_string(data.ransac_noise_beta,precision))
# print('\n')
data.print_poses()
#plot_errors(ls_errors, m_errors, r_errors, lsn_errors, mn_errors, rn_errors)
return data
def run_esperiment_dummy():
# coefficients of the model
a1, a2, a3 = 0.1, -0.2, 4.0
# ground truth
A_gt = [a1, a2, a3]
#print 'A_gt = ', A_gt
# create a coordinate matrix
nx = np.linspace(-1, 1, 41)
ny = np.linspace(-1, 1, 41)
x, y = np.meshgrid(nx, ny)
# make the estimation
z = a1*x + a2*y + a3
# let's add some gaussian noise
z_noise = z + 0.1*np.random.standard_normal(z.shape)
x_fl = x.flatten()
y_fl = y.flatten()
z_ones = np.ones([x.size,1])
X = np.hstack((np.reshape(x_fl, ([len(x_fl),1])), np.reshape(y_fl, ([len(y_fl),1])), z_ones))
Z = np.zeros(z_noise.shape)
Z[:] = z_noise
Z_fl = Z.flatten()
Z = np.reshape(Z_fl, ([len(Z_fl),1]))
# create outliers
outlier_prop = 0.3
outlier_IND = np.random.permutation(x.size)
outlier_IND = outlier_IND[0:np.floor(x.size * outlier_prop)]
z_noise_outlier = np.zeros(z_noise.shape)
z_noise_outlier[:] = z_noise
z_noise_outlier_flt = z_noise_outlier.flatten()
z_noise_outlier_flt[outlier_IND] = z_noise_outlier_flt[outlier_IND] + 10*np.random.standard_normal(z_noise_outlier_flt[outlier_IND].shape)
z_noise_outlier = np.reshape(z_noise_outlier_flt, z.shape)
# non-robust least squares estimation
Z = np.zeros(z_noise_outlier.shape)
Z[:] = z_noise_outlier
Z_fl = Z.flatten()
Z = np.reshape(Z_fl, ([len(Z_fl),1]))
beta = pose_estimate.least_square(X,Z)
beta_m = pose_estimate.m_estimator(X,Z,5)
beta_r = pose_estimate.ransac_ls(X,Z,10,5)
beta_r = pose_estimate.m_estimator(X,Z,5,beta_r)
z_lsq_outlier = np.dot(X, beta)
z_lsq_outlier = np.reshape(z_lsq_outlier, z.shape)
z_m_outlier = np.dot(X, beta_m)
z_m_outlier = np.reshape(z_m_outlier, z.shape)
z_r_outlier = np.dot(X, beta_r)
z_r_outlier = np.reshape(z_r_outlier, z.shape)
lsq_non_robust_outlier = np.hstack((z, z_noise_outlier, z_lsq_outlier, z_m_outlier, z_r_outlier))
plt.figure()
plt.title('Non-robust estimate (corrupted by noise AND outliers)')
plt.imshow(lsq_non_robust_outlier)
plt.clim(z.min(), z.max())
plt.show()
#run_esperiment()
# ransac_ls(data.X,data.Y,5,3)
#
data = test_data()
data.get_fixed_data(2)
data.move_points(0.00,0,0,0,np.deg2rad(1),0)
#run_esperiment_dummy()
|
bsd-3-clause
|
AndKe/MAVProxy
|
MAVProxy/modules/lib/MacOS/backend_wxagg.py
|
7
|
5884
|
from __future__ import division, print_function
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx # already uses wxversion.ensureMinimal('2.8')
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
import wx
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
|
gpl-3.0
|
jmetzen/scikit-learn
|
examples/classification/plot_lda_qda.py
|
29
|
4952
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
|
bsd-3-clause
|
cuiwei0322/cost_analysis
|
tall_building_zero_attack_angle_cost_analysis/Result/plot_cost_std.py
|
1
|
1320
|
from pylab import *
import scipy.io
from matplotlib.font_manager import FontProperties
from scipy.interpolate import interp1d
from matplotlib import rc
import matplotlib.pyplot as plt
rc('font',**{'family':'serif','serif':['Times New Roman'],'size':7})
mat_contents = scipy.io.loadmat('./cuiwei/cost_std.mat')
t = mat_contents['t'][0]
cost_0 = mat_contents['cost'][0]
cost_1 = mat_contents['cost'][1]
cost_2 = mat_contents['cost'][2]
cost_3 = mat_contents['cost'][3]
cost_4 = mat_contents['cost'][4]
cost_5 = mat_contents['cost'][5]
fig = plt.figure(num=1, dpi=300, facecolor='w', edgecolor='k',figsize=(2.9,2.4))
ax1 = fig.add_subplot(111)
lw=0.3
lw1=0.6
# # ax1.plot(time, f_cost(time),'b-',label='RMS')
ax1.plot(t, cost_0,'-y',linewidth=lw,label=r"$\sigma_{C_D}=0.0$")
ax1.plot(t, cost_1,'--b',linewidth=lw,label=r"$\sigma_{C_D}=0.1$")
ax1.plot(t, cost_2,'-.k',linewidth=lw,label=r"$\sigma_{C_D}=0.2$")
ax1.plot(t, cost_3,':r',linewidth=lw,label=r"$\sigma_{C_D}=0.3$")
ax1.plot(t, cost_4,'-m',linewidth=lw1,label=r"$\sigma_{C_D}=0.4$")
ax1.plot(t, cost_5,'--g',linewidth=lw1,label=r"$\sigma_{C_D}=0.5$")
legend(loc='lower right',frameon=False,shadow=False,handlelength = 4)
xlabel(r'Time (years)');
ylabel(r'Expected relative cost, $C_{M, E}$');
tight_layout()
ylim([0,4])
# plt.show()
plt.savefig("cost_std.pdf")
|
apache-2.0
|
kdebrab/pandas
|
pandas/core/api.py
|
1
|
3090
|
# pylint: disable=W0614,W0401,W0611
# flake8: noqa
import numpy as np
from pandas.core.algorithms import factorize, unique, value_counts
from pandas.core.dtypes.missing import isna, isnull, notna, notnull
from pandas.core.arrays import Categorical
from pandas.core.groupby import Grouper
from pandas.io.formats.format import set_eng_float_format
from pandas.core.index import (Index, CategoricalIndex, Int64Index,
UInt64Index, RangeIndex, Float64Index,
MultiIndex, IntervalIndex,
TimedeltaIndex, DatetimeIndex,
PeriodIndex, NaT)
from pandas.core.indexes.period import Period, period_range, pnow
from pandas.core.indexes.timedeltas import Timedelta, timedelta_range
from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range
from pandas.core.indexes.interval import Interval, interval_range
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
# TODO: Remove import when statsmodels updates #18264
from pandas.core.reshape.reshape import get_dummies
from pandas.core.indexing import IndexSlice
from pandas.core.tools.numeric import to_numeric
from pandas.tseries.offsets import DateOffset
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
# see gh-14094.
from pandas.util._depr_module import _DeprecatedModule
_removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay',
'customBusinessMonthEnd', 'customBusinessMonthBegin',
'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin',
'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd',
'byearEnd', 'week']
datetools = _DeprecatedModule(deprmod='pandas.core.datetools',
removals=_removals)
from pandas.core.config import (get_option, set_option, reset_option,
describe_option, option_context, options)
# deprecation, xref #13790
def match(*args, **kwargs):
import warnings
warnings.warn("pd.match() is deprecated and will be removed "
"in a future version",
FutureWarning, stacklevel=2)
from pandas.core.algorithms import match
return match(*args, **kwargs)
def groupby(*args, **kwargs):
import warnings
warnings.warn("pd.groupby() is deprecated and will be removed; "
"Please use the Series.groupby() or "
"DataFrame.groupby() methods",
FutureWarning, stacklevel=2)
return args[0].groupby(*args[1:], **kwargs)
# Deprecation: xref gh-16747
class TimeGrouper(object):
def __new__(cls, *args, **kwargs):
from pandas.core.resample import TimeGrouper
import warnings
warnings.warn("pd.TimeGrouper is deprecated and will be removed; "
"Please use pd.Grouper(freq=...)",
FutureWarning, stacklevel=2)
return TimeGrouper(*args, **kwargs)
|
bsd-3-clause
|
toastedcornflakes/scikit-learn
|
benchmarks/bench_glmnet.py
|
111
|
3890
|
"""
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
s-gv/rnicu-webapp
|
stream-plot/src/galry/visuals/surface_visual.py
|
2
|
2595
|
import numpy as np
from .visual import Visual
from .mesh_visual import MeshVisual
from matplotlib.colors import hsv_to_rgb
def colormap(x):
"""Colorize a 2D grayscale array.
Arguments:
* x:an NxM array with values in [0,1]
Returns:
* y: an NxMx3 array with a rainbow color palette.
"""
x = np.clip(x, 0., 1.)
# initial and final gradient colors, here rainbow gradient
col0 = np.array([.67, .91, .65]).reshape((1, 1, -1))
col1 = np.array([0., 1., 1.]).reshape((1, 1, -1))
col0 = np.tile(col0, x.shape + (1,))
col1 = np.tile(col1, x.shape + (1,))
x = np.tile(x.reshape(x.shape + (1,)), (1, 1, 3))
return hsv_to_rgb(col0 + (col1 - col0) * x)
__all__ = ['SurfaceVisual']
class SurfaceVisual(MeshVisual):
def initialize(self, Z, *args, **kwargs):
assert Z.ndim == 2, "Z must have exactly two dimensions"
n, m = Z.shape
# generate grid
x = np.linspace(-1., 1., m)
y = np.linspace(-1., 1., n)
X, Y = np.meshgrid(x, y)
# generate vertices positions
position = np.hstack((X.reshape((-1, 1)), Z.reshape((-1, 1)), Y.reshape((-1, 1)),))
#color
m, M = Z.min(), Z.max()
if m != M:
Znormalized = (Z - m) / (M - m)
else:
Znormalized = Z
color = colormap(Znormalized).reshape((-1, 3))
color = np.hstack((color, np.ones((n*n,1))))
# normal
U = np.dstack((X[:,1:] - X[:,:-1],
Y[:,1:] - Y[:,:-1],
Z[:,1:] - Z[:,:-1]))
V = np.dstack((X[1:,:] - X[:-1,:],
Y[1:,:] - Y[:-1,:],
Z[1:,:] - Z[:-1,:]))
U = np.hstack((U, U[:,-1,:].reshape((-1,1,3))))
V = np.vstack((V, V[-1,:,:].reshape((1,-1,3))))
W = np.cross(U, V)
normal0 = W.reshape((-1, 3))
normal = np.zeros_like(normal0)
normal[:,0] = normal0[:,0]
normal[:,1] = normal0[:,2]
normal[:,2] = normal0[:,1]
# tesselation of the grid
index = []
for i in range(n-1):
for j in range(n-1):
index.extend([i*n+j, (i+1)*n+j, i*n+j+1,
(i+1)*n+j, i*n+j+1, (i+1)*n+j+1])
index = np.array(index)
kwargs.update(
position=position,
normal=normal,
color=color,
index=index,
)
super(SurfaceVisual, self).initialize(*args, **kwargs)
|
agpl-3.0
|
cuemacro/finmarketpy
|
finmarketpy_examples/seasonality_examples.py
|
1
|
7758
|
__author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Shows how to calculate seasonality
"""
# Loading data
import datetime
import pandas
from chartpy import Chart, Style
from finmarketpy.economics import Seasonality
from findatapy.market import Market, MarketDataGenerator, MarketDataRequest
from chartpy.style import Style
from findatapy.timeseries import Calculations
from findatapy.util.loggermanager import LoggerManager
seasonality = Seasonality()
calc = Calculations()
logger = LoggerManager().getLogger(__name__)
chart = Chart(engine='matplotlib')
market = Market(market_data_generator=MarketDataGenerator())
# choose run_example = 0 for everything
# run_example = 1 - seasonality of gold
# run_example = 2 - seasonality of FX vol
# run_example = 3 - seasonality of gasoline
# run_example = 4 - seasonality in NFP
# run_example = 5 - seasonal adjustment in NFP
run_example = 0
###### Calculate seasonal moves in Gold (using Bloomberg data)
if run_example == 1 or run_example == 0:
md_request = MarketDataRequest(
start_date = "01 Jan 1996", # start date
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['Gold'],
fields = ['close'], # which fields to download
vendor_tickers = ['XAUUSD Curncy'], # ticker (Bloomberg)
vendor_fields = ['PX_LAST'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
df = market.fetch_market(md_request)
df_ret = calc.calculate_returns(df)
day_of_month_seasonality = seasonality.bus_day_of_month_seasonality(df_ret, partition_by_month = False)
day_of_month_seasonality = calc.convert_month_day_to_date_time(day_of_month_seasonality)
style = Style()
style.date_formatter = '%b'
style.title = 'Gold seasonality'
style.scale_factor = 3
style.file_output = "gold-seasonality.png"
chart.plot(day_of_month_seasonality, style=style)
###### Calculate seasonal moves in FX vol (using Bloomberg data)
if run_example == 2 or run_example == 0:
tickers = ['EURUSDV1M', 'USDJPYV1M', 'GBPUSDV1M', 'AUDUSDV1M']
md_request = MarketDataRequest(
start_date = "01 Jan 1996", # start date
data_source = 'bloomberg', # use Bloomberg as data source
tickers = tickers,
fields = ['close'], # which fields to download
vendor_tickers = [x + ' Curncy' for x in tickers], # ticker (Bloomberg)
vendor_fields = ['PX_LAST'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
df = market.fetch_market(md_request)
df_ret = calc.calculate_returns(df)
day_of_month_seasonality = seasonality.bus_day_of_month_seasonality(df_ret, partition_by_month = False)
day_of_month_seasonality = calc.convert_month_day_to_date_time(day_of_month_seasonality)
style = Style()
style.date_formatter = '%b'
style.title = 'FX vol seasonality'
style.scale_factor = 3
style.file_output = "fx-vol-seasonality.png"
style.source = 'finmarketpy/Bloomberg'
chart.plot(day_of_month_seasonality, style=style)
###### Calculate seasonal moves in Gasoline (using Bloomberg data)
if run_example == 3 or run_example == 0:
md_request = MarketDataRequest(
start_date = "01 Jan 1996", # start date
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['Gasoline'],
fields = ['close'], # which fields to download
vendor_tickers = ['XB1 Comdty'], # ticker (Bloomberg)
vendor_fields = ['PX_LAST'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
df = market.fetch_market(md_request)
df_ret = calc.calculate_returns(df)
day_of_month_seasonality = seasonality.bus_day_of_month_seasonality(df_ret, partition_by_month = False)
day_of_month_seasonality = calc.convert_month_day_to_date_time(day_of_month_seasonality)
style = Style()
style.date_formatter = '%b'
style.title = 'Gasoline seasonality'
style.scale_factor = 3
style.file_output = "gasoline-seasonality.png"
chart.plot(day_of_month_seasonality, style=style)
###### Calculate seasonal moves in US non-farm payrolls (using Bloomberg data)
if run_example == 4 or run_example == 0:
# get the NFP NSA from ALFRED/FRED
md_request = MarketDataRequest(
start_date="01 Jun 2000", # start date (download data over past decade)
data_source='alfred', # use ALFRED/FRED as data source
tickers=['US NFP'], # ticker
fields=['actual-release'], # which fields to download
vendor_tickers=['PAYNSA'], # ticker (FRED) PAYEMS (NSA)
vendor_fields=['actual-release']) # which FRED fields to download
df = market.fetch_market(md_request)
df_ret = calc.calculate_returns(df)
month_seasonality = seasonality.monthly_seasonality_from_prices(df)
style = Style()
style.date_formatter = '%b'
style.title = 'NFP seasonality'
style.scale_factor = 3
style.file_output = "nfp-seasonality.png"
chart.plot(month_seasonality, style=style)
###### Apply seasonal adjustment to NFP data and compare the seasonal adjustment by finmarketpy with that of BLS
if run_example == 5 or run_example == 0:
# get the NFP NSA from ALFRED/FRED
md_request = MarketDataRequest(
start_date="01 Jun 1980", # start date (download data over past decade)
data_source='alfred', # use ALFRED/FRED as data source
tickers=['US NFP (NSA)', 'US NFP (SA)'], # ticker
fields=['actual-release'], # which fields to download
vendor_tickers=['PAYNSA', 'PAYEMS'], # ticker (FRED) PAYEMS (SA) PAYNSA (NSA)
vendor_fields=['actual-release']) # which FRED fields to download
df = market.fetch_market(md_request)
# Calculate changes in NFP
df = df - df.shift(1)
df_seasonal_adjusted = seasonality.adjust_rolling_seasonality(pandas.DataFrame(df['US NFP (NSA).actual-release']),
window=12*20, likely_period=12)
df_seasonal_adjusted.columns = [x + ' SA finmarketpy' for x in df_seasonal_adjusted.columns]
# Compare not seasonally adjusted vs seasonally adjusted
df = df.join(df_seasonal_adjusted)
df = df[df.index > '01 Jan 2000']
style = Style()
style.title = 'NFP (seasonally adjusted)'
style.scale_factor = 3
style.file_output = "nfp-seasonally-adjusted.png"
chart.plot(df, style=style)
|
apache-2.0
|
Carralex/landlab
|
landlab/components/overland_flow/examples/deAlmeida_DEM_driver.py
|
5
|
4529
|
#! /usr/env/python
""" deAlmeida_SquareBasin.py
This is a example driver which utilizes the
OverlandFlow class from generate_overland_flow_deAlmeida.py
The driver reads in a square watershed run to steady state using a simple
stream power driver.
It then routes a storm across the square watershed. Storm parameters taken from
Hawk and Eagleson (1992) Poisson parameters for the Denver, CO station.
After the storm, additional time is needed to drain the water from the system.
At the end of the storm, total water depth mass is calculated and compared
against the predicted water mass under steady state conditions. The hydrograph
is plotted and percent error is output.
Written by Jordan M. Adams, April 2016.
"""
from __future__ import print_function
from landlab.components.overland_flow import OverlandFlow
from landlab.io import read_esri_ascii
from matplotlib import pyplot as plt
import os
import time
import numpy as np
# This provides us with an initial time. At the end, it gives us total
# model run time in seconds.
start_time = time.time()
## This is a steady-state landscape generated by simple stream power
## This is a 200 x 200 grid with an outlet at center of the bottom edge.
dem_name ='Square_TestBasin.asc'
## Now we can create and initialize a raster model grid by reading a DEM
## First, this looks for the DEM in the overland_flow folder in Landlab
DATA_FILE = os.path.join(os.path.dirname(__file__), dem_name)
## Now the ASCII is read, assuming that it is standard ESRI format.
(rmg, z) = read_esri_ascii(DATA_FILE)
## Start time 1 second
elapsed_time = 1.0
## Model Run Time in seconds
model_run_time = 216000.0
## Lists for saving data
discharge_at_outlet = []
hydrograph_time_sec = []
hydrograph_time_hrs = []
## Setting initial fields...
rmg['node']['topographic__elevation'] = z
rmg['link']['surface_water__discharge'] = np.zeros(rmg.number_of_links)
rmg['node']['surface_water__depth'] = np.zeros(rmg.number_of_nodes)
## and fixed link boundary conditions...
rmg.set_fixed_link_boundaries_at_grid_edges(True, True, True, True,
fixed_link_value_of='surface_water__discharge')
## Setting the outlet node to OPEN_BOUNDARY
rmg.status_at_node[100] = 1
## Initialize the OverlandFlow() class.
of = OverlandFlow(rmg, use_fixed_links = True, steep_slopes=True)
## Record the start time so we know how long it runs.
start_time = time.time()
## Link to sample at the outlet
link_to_sample = 299
## Storm duration in seconds
storm_duration = 7200.0
## Running the overland flow component.
while elapsed_time < model_run_time:
## The storm starts when the model starts. While the elapsed time is less
## than the storm duration, we add water to the system as rainfall.
if elapsed_time < storm_duration:
of.rainfall_intensity = 4.07222 * (10 ** -7) # Rainfall intensity (m/s)
## Then the elapsed time exceeds the storm duration, rainfall ceases.
else:
of.rainfall_intensity = 0.0
## Generating overland flow based on the deAlmeida solution.
of.overland_flow()
## Append time and discharge to their lists to save data and for plotting.
hydrograph_time_sec.append(elapsed_time)
hydrograph_time_hrs.append(round(elapsed_time/3600., 2))
discharge_at_outlet.append(of.q[link_to_sample])
## Add the time step, repeat until elapsed time >= model_run_time
print(elapsed_time)
elapsed_time += of.dt
plt.figure(1)
plt.imshow(z.reshape(rmg.shape), origin='left', cmap='pink')
plt.tick_params(axis='both', labelbottom='off', labelleft='off')
cb = plt.colorbar()
cb.set_label('Elevation (m)', rotation=270, labelpad=15)
plt.figure(2)
plt.plot(hydrograph_time_hrs, (np.abs(discharge_at_outlet)*rmg.dx), 'b-')
plt.xlabel('Time (hrs)')
plt.ylabel('Discharge (cms)')
plt.title('Hydrograph')
calc_water_mass = round(np.abs((np.trapz(hydrograph_time_sec, (np.abs(
discharge_at_outlet) * rmg.dx)))), 2)
theoretical_water_mass = round(((rmg.number_of_core_nodes * rmg.cellarea) *
(4.07222 * (10 ** -7)) * storm_duration), 2)
percent_error = round(((np.abs(calc_water_mass) - theoretical_water_mass) /
theoretical_water_mass * 100), 2)
print('\n', 'Total calculated water mass: ', calc_water_mass)
print('\n', 'Theoretical water mass (Q = P * A): ', theoretical_water_mass)
print('\n', 'Percent Error: ', percent_error, ' %')
endtime = time.time()
print('\n', 'Total run time: ', round(endtime - start_time, 2), ' seconds')
|
mit
|
ZenDevelopmentSystems/scikit-learn
|
sklearn/cluster/setup.py
|
263
|
1449
|
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
jbloom/mapmuts
|
scripts/mapmuts_inferenrichment.py
|
1
|
14918
|
#!python
"""Infers enrichment ratios.
Written by Jesse Bloom, 2013.
"""
import re
import sys
import os
import time
import warnings
import mapmuts
import mapmuts.io
import mapmuts.sequtils
import mapmuts.bayesian
import mapmuts.plot
def main():
"""Main body of script."""
# check on module availability
if not mapmuts.bayesian.PymcAvailable():
raise ImportError("Cannot run this script as pymc or numpy are not available.")
if not mapmuts.bayesian.ScipyAvailable():
warnings.warn("Cannot import scipy. The MCMC in this script will be less efficient. Installation of scipy is strongly recommended to improve performance.\n")
# read input variables
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument"\
+ ' specifying the name of the input file.')
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile of %s" % infilename)
d = mapmuts.io.ParseInfile(open(infilename))
outfileprefix = mapmuts.io.ParseStringValue(d, 'outfileprefix')
logfile = "%s_inferenrichment_log.txt" % outfileprefix
log = open(logfile, 'w')
equilibriumfreqsfile = open('%s_equilibriumfreqs.txt' % outfileprefix, 'w')
equilibriumfreqsfile.write('#SITE\tWT_AA\tSITE_ENTROPY')
for aa in mapmuts.sequtils.AminoAcids():
equilibriumfreqsfile.write('\tPI_%s' % aa)
equilibriumfreqsfile.write('\n')
enrichmentratiosfile = open('%s_enrichmentratios.txt' % outfileprefix, 'w')
enrichmentratiosfile.write('#MUTATION\tPHI\tPHI_HPD95_LOW\tPHI_HPD95_HIGH\tDIRECT_RATIO\tMUTDNA_COUNTS\n')
try:
log.write("Beginning execution of mapmuts_inferenrichment.py"\
" in directory %s" % (os.getcwd()))
mapmuts.io.PrintVersions(log)
log.write("Input data being read from infile %s\n\n" % infilename)
log.write("Progress being logged to this file, %s\n\n" % logfile)
log.write("Read the following key/value pairs from infile %s:"\
% (infilename))
for (key, value) in d.iteritems():
log.write("\n%s %s" % (key, value))
dnafiles = mapmuts.io.ParseFileList(d, 'DNA_files')
rnafiles = mapmuts.io.ParseFileList(d, 'RNA_files')
mutdnafiles = mapmuts.io.ParseFileList(d, 'mutDNA_files')
mutvirusfiles = mapmuts.io.ParseFileList(d, 'mutvirus_files')
if not (len(dnafiles) == len(rnafiles) == len(mutdnafiles) == len(mutvirusfiles) >= 1):
raise IOError("Failed to find four file lists (DNA, RNA, mutDNA, mutvirus) all of the same length with at least one file each.")
# parse any excludesite_NNN entries
excludekeymatch = re.compile('excludesite\_(?P<site>\d+)')
excludekeys = [key for key in d.iterkeys() if excludekeymatch.search(key)]
exclude_libs = {} # keyed by site, values dictionary keyed by excluded num (0, 1, 2, ..)
for key in excludekeys:
site = int(excludekeymatch.search(key).group('site'))
exclude_libs[site] = {}
entries = d[key].split()
if len(entries) != len(dnafiles):
raise ValueError("Invalid number of entries for %s" % key)
for i in range(len(entries)):
if entries[i].strip() == 'use':
continue
elif entries[i].strip() == 'exclude':
exclude_libs[site][i] = True
else:
raise ValueError("Entries for %s must be 'use' or 'exclude', not %s" % (key, entries[i]))
#
alpha = mapmuts.io.ParseFloatValue(d, 'alpha')
phi_prior = mapmuts.io.ParseFloatValue(d, 'phi_prior')
if not phi_prior > 0:
raise ValueError("phi_prior must be greater than zero")
log.write("\nUsing a prior for phi of phi_prior = %.4f\n" % phi_prior)
if not (alpha > 0):
raise ValueError("alpha must be greater than zero")
minbeta = mapmuts.io.ParseFloatValue(d, 'minbeta')
if not (minbeta > 0):
raise ValueError("minbeta must be greater than zero")
seed = mapmuts.io.ParseIntValue(d, 'seed')
mapmuts.bayesian.Seed(seed)
nruns = mapmuts.io.ParseIntValue(d, 'nruns')
if nruns < 2:
warnings.warn('Will not be able to check for convergence since nruns < 2')
nsteps = mapmuts.io.ParseIntValue(d, 'nsteps')
burn = mapmuts.io.ParseIntValue(d, 'burn')
thin = mapmuts.io.ParseIntValue(d, 'thin')
convergence = mapmuts.io.ParseFloatValue(d, 'convergence')
if not (convergence > 1):
raise ValueError("convergence must be greater than one")
stepincrease = mapmuts.io.ParseIntValue(d, 'stepincrease')
convergencewarning = mapmuts.io.ParseBoolValue(d, 'convergencewarning')
MCMC_traces = mapmuts.io.ParseStringValue(d, 'MCMC_traces')
if MCMC_traces in ['None', 'False']:
MCMC_traces = None
elif not mapmuts.plot.PylabAvailable():
log.write("\nWARNING: cannot create posterior plots as pylab / matplotlib are not available.\n")
MCMC_traces = None
elif not os.path.isdir(MCMC_traces):
raise IOError("MCMC_traces directory of %s does not already exist. You must create it before running this script." % MCMC_traces)
enrichmentratio_plots = mapmuts.io.ParseStringValue(d, 'enrichmentratio_plots')
if enrichmentratio_plots in ['None', 'False']:
enrichmentratio_plots = None
elif not mapmuts.plot.PylabAvailable():
log.write("\nWARNING: cannot create enrichment ratio plots as pylab / matplotlib are not available.\n")
enrichmentratio_plots = None
elif not os.path.isdir(enrichmentratio_plots):
raise IOError("enrichmentratio_plots directory of %s does not already exist. You must create it before running this script." % enrichmentratio_plots)
equilibriumfreqs_plots = mapmuts.io.ParseStringValue(d, 'equilibriumfreqs_plots')
if equilibriumfreqs_plots in ['None', 'False']:
equilibriumfreqs_plots = None
elif not mapmuts.plot.PylabAvailable():
log.write("\nWARNING: cannot create equilibrium frequency plots as pylab / matplotlib are not available.\n")
equilibriumfreqs_plots = None
elif not os.path.isdir(equilibriumfreqs_plots):
raise IOError("equilibriumfreqs_plots directory of %s does not already exist. You must create it before running this script." % equilibriumfreqs_plots)
log.write('\n\n')
# read codon counts and set up priors
log.write("Reading in the codon counts...")
log.flush()
nlibs = len(dnafiles)
dna_libs = [mapmuts.io.ReadCodonCounts(open(f)) for f in dnafiles]
rna_libs = [mapmuts.io.ReadCodonCounts(open(f)) for f in rnafiles]
mutdna_libs = [mapmuts.io.ReadCodonCounts(open(f)) for f in mutdnafiles]
mutvirus_libs = [mapmuts.io.ReadCodonCounts(open(f)) for f in mutvirusfiles]
for x in dna_libs + rna_libs + mutdna_libs + mutvirus_libs:
mapmuts.sequtils.ClassifyCodonCounts(x)
log.write(" completed reading the codon counts.\n")
assert nlibs == len(dna_libs) == len(rna_libs) == len(mutdna_libs) == len(mutvirus_libs)
mu_priors = []
rho_priors = []
epsilon_priors = []
for ilib in range(nlibs):
mu_priors.append(max(minbeta, (mutdna_libs[ilib]['TOTAL_MUT'] / float(mutdna_libs[ilib]['TOTAL_COUNTS']) - dna_libs[ilib]['TOTAL_MUT'] / float(dna_libs[ilib]['TOTAL_COUNTS'])) / 63))
rho = {}
epsilon = {}
for (ndiffs, denom) in [(1, 9.0), (2, 27.0), (3, 27.0)]:
epsilon[ndiffs] = max(minbeta, dna_libs[ilib]['TOTAL_N_%dMUT' % ndiffs] / float(dna_libs[ilib]['TOTAL_COUNTS']) / denom)
rho[ndiffs] = max(minbeta, rna_libs[ilib]['TOTAL_N_%dMUT' % ndiffs] / float(rna_libs[ilib]['TOTAL_COUNTS']) / denom - epsilon[ndiffs])
epsilon_priors.append(epsilon)
rho_priors.append(rho)
# get the protein length
integer_keys = [key for key in dna_libs[0].keys() if isinstance(key, int)]
protlength = max(integer_keys)
assert protlength == len(integer_keys), "Problem with residue numbering?"
# loop over all residues
for ires in range(1, protlength + 1):
phis = {}
wtcodon = dna_libs[0][ires]['WT']
wtaa = mapmuts.sequtils.Translate([('wt', wtcodon)])[0][1]
if not wtaa:
wtaa = '*'
decorated_list = []
for (mutaa, mutcodons) in mapmuts.sequtils.MutAAsCodons(wtcodon):
if MCMC_traces:
plot_phi_traces = "%s/%s_%s%d%s.pdf" % (MCMC_traces, outfileprefix, wtaa, ires, mutaa)
else:
plot_phi_traces = False
start_t = time.clock()
log.write('\nPerforming inference for %s%d%s...' % (wtaa, ires, mutaa))
log.flush()
library_stats = []
for ilib in range(nlibs):
if (ires in exclude_libs) and (ilib in exclude_libs[site]):
continue
assert dna_libs[ilib][ires]['WT'] == rna_libs[ilib][ires]['WT'] == mutdna_libs[ilib][ires]['WT'] == mutvirus_libs[ilib][ires]['WT'] == wtcodon
libstats = {'mu_prior':mu_priors[ilib],
'epsilon_prior':epsilon_priors[ilib],
'rho_prior':rho_priors[ilib],
'Nrdna':dna_libs[ilib][ires]['PAIRED_COUNTS'],
'Nrrna':rna_libs[ilib][ires]['PAIRED_COUNTS'],
'Nrmutdna':mutdna_libs[ilib][ires]['PAIRED_COUNTS'],
'Nrmutvirus':mutvirus_libs[ilib][ires]['PAIRED_COUNTS'],
'nrdna_list':[dna_libs[ilib][ires][codon] for codon in mutcodons],
'nrrna_list':[rna_libs[ilib][ires][codon] for codon in mutcodons],
'nrmutdna_list':[mutdna_libs[ilib][ires][codon] for codon in mutcodons],
'nrmutvirus_list':[mutvirus_libs[ilib][ires][codon] for codon in mutcodons],
}
library_stats.append(libstats)
if not library_stats:
raise ValueError("No included libraries for %s%d" % (mutaa, ires))
(phi_mean, phi_hpd95, phi_samples, converged) = mapmuts.bayesian.InferEnrichmentMCMC_2(alpha, phi_prior, wtcodon, mutcodons, library_stats, nruns, nsteps, burn, thin, convergence=convergence, plot_phi_traces=plot_phi_traces)
t = time.clock() - start_t
log.write(" completed MCMC of %d steps in %.1f seconds; inferred phi of %.4f (%.4f to %.4f)" % (nsteps, t, phi_mean, phi_hpd95[0], phi_hpd95[1]))
if nruns > 1 and not converged:
log.write('; inference FAILED to converge.\n')
log.flush()
if stepincrease > 1:
start_t = time.clock()
log.write('Trying again with %d-fold more steps...' % stepincrease)
log.flush()
(phi_mean, phi_hpd95, phi_samples, converged) = mapmuts.bayesian.InferEnrichmentMCMC_2(alpha, phi_prior, wtcodon, mutcodons, library_stats, nruns, stepincrease * nsteps, burn * stepincrease, thin, convergence=convergence, plot_phi_traces=plot_phi_traces)
t = time.clock() - start_t
if converged:
log.write(' this time MCMC converged in %.1f seconds; inferred phi of %.4f (%.4f to %.4f).\n' % (t, phi_mean, phi_hpd95[0], phi_hpd95[1]))
else:
log.write(' MCMC still FAILED to converge in %.1f seconds; inferred phi of %.4f (%.4f to %.4f).\n' % (t, phi_mean, phi_hpd95[0], phi_hpd95[1]))
if not converged:
warnings.warn('Inference failed to converge for %s%d%s. Using non-converged estimate.' % (wtaa, ires, mutaa), RuntimeWarning)
elif converged:
log.write("; inference converged.\n")
else:
log.write('.\n')
(direct_ratio, mutdnacounts) = mapmuts.bayesian.DirectEnrichmentRatio(library_stats)
log.write("For comparison, the direct ratio is %.4f with %d mutDNA counts.\n" % (direct_ratio, mutdnacounts))
log.flush()
enrichmentratiosfile.write("%s%d%s\t%f\t%f\t%f\t%f\t%d\n" % (wtaa, ires, mutaa, phi_mean, phi_hpd95[0], phi_hpd95[1], direct_ratio, mutdnacounts))
enrichmentratiosfile.flush()
decorated_list.append(("%s%d%s" % (wtaa, ires, mutaa), phi_mean, phi_hpd95[0], phi_hpd95[1]))
if mutaa != '*':
phis[mutaa] = phi_mean
decorated_list.sort()
if enrichmentratio_plots:
mutations = [x[0] for x in decorated_list]
ratios = [x[1] for x in decorated_list]
ratio_low = [x[2] for x in decorated_list]
ratio_high = [x[3] for x in decorated_list]
plotfile = "%s/%s_%s%d.pdf" % (enrichmentratio_plots, outfileprefix, wtaa, ires)
mapmuts.plot.PlotEnrichmentRatios(mutations, ratios, [ratio_low, ratio_high], plotfile)
pis = mapmuts.bayesian.EquilibriumFracs(wtaa, phis) # equilibrium fracs
assert len(pis) == 20 and abs(sum(pis.values()) - 1.0) < 1e-6
h = mapmuts.bayesian.SiteEntropy(pis) # site entropy
equilibriumfreqsfile.write('%d\t%s\t%f' % (ires, wtaa, h))
for aa in mapmuts.sequtils.AminoAcids():
equilibriumfreqsfile.write('\t%f' % pis[aa])
equilibriumfreqsfile.write('\n')
equilibriumfreqsfile.flush()
if equilibriumfreqs_plots:
plotfile = '%s/%s_%s%d.pdf' % (equilibriumfreqs_plots, outfileprefix, wtaa, ires)
title = 'residue %s%d, site entropy of %.2f bits' % (wtaa, ires, h)
mapmuts.plot.PlotEquilibriumFreqs(pis, plotfile, title)
except:
for x in sys.exc_info():
log.write("\n\n%s" % str(x))
log.write("\n\nPrematurely closing log due to execution error.")
raise
finally:
log.write("\n\nExecution completed at %s." % time.ctime())
log.close()
enrichmentratiosfile.close()
equilibriumfreqsfile.close()
if __name__ == '__main__':
main() # run the script
|
gpl-3.0
|
edhuckle/statsmodels
|
statsmodels/base/data.py
|
9
|
22573
|
"""
Base tools for handling various kinds of data structures, attaching metadata to
results, and doing data cleaning
"""
from statsmodels.compat.python import reduce, iteritems, lmap, zip, range
from statsmodels.compat.numpy import np_matrix_rank
import numpy as np
from pandas import DataFrame, Series, TimeSeries, isnull
from statsmodels.tools.decorators import (resettable_cache, cache_readonly,
cache_writable)
import statsmodels.tools.data as data_util
from statsmodels.tools.sm_exceptions import MissingDataError
def _asarray_2dcolumns(x):
if np.asarray(x).ndim > 1 and np.asarray(x).squeeze().ndim == 1:
return
def _asarray_2d_null_rows(x):
"""
Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x.
"""
#Have to have the asarrays because isnull doesn't account for array-like
#input
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
return np.any(isnull(x), axis=1)[:, None]
def _nan_rows(*arrs):
"""
Returns a boolean array which is True where any of the rows in any
of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
DataFrames or array-like.
"""
if len(arrs) == 1:
arrs += ([[False]],)
def _nan_row_maybe_two_inputs(x, y):
# check for dtype bc dataframe has dtypes
x_is_boolean_array = hasattr(x, 'dtype') and x.dtype == bool and x
return np.logical_or(_asarray_2d_null_rows(x),
(x_is_boolean_array | _asarray_2d_null_rows(y)))
return reduce(_nan_row_maybe_two_inputs, arrs).squeeze()
class ModelData(object):
"""
Class responsible for handling input data and extracting metadata into the
appropriate form
"""
_param_names = None
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
if 'design_info' in kwargs:
self.design_info = kwargs.pop('design_info')
if 'formula' in kwargs:
self.formula = kwargs.pop('formula')
if missing != 'none':
arrays, nan_idx = self.handle_missing(endog, exog, missing,
**kwargs)
self.missing_row_idx = nan_idx
self.__dict__.update(arrays) # attach all the data arrays
self.orig_endog = self.endog
self.orig_exog = self.exog
self.endog, self.exog = self._convert_endog_exog(self.endog,
self.exog)
else:
self.__dict__.update(kwargs) # attach the extra arrays anyway
self.orig_endog = endog
self.orig_exog = exog
self.endog, self.exog = self._convert_endog_exog(endog, exog)
# this has side-effects, attaches k_constant and const_idx
self._handle_constant(hasconst)
self._check_integrity()
self._cache = resettable_cache()
def __getstate__(self):
from copy import copy
d = copy(self.__dict__)
if "design_info" in d:
del d["design_info"]
d["restore_design_info"] = True
return d
def __setstate__(self, d):
if "restore_design_info" in d:
# NOTE: there may be a more performant way to do this
from patsy import dmatrices, PatsyError
exc = []
try:
data = d['frame']
except KeyError:
data = d['orig_endog'].join(d['orig_exog'])
for depth in [2, 3, 1, 0, 4]: # sequence is a guess where to likely find it
try:
_, design = dmatrices(d['formula'], data, eval_env=depth,
return_type='dataframe')
break
except (NameError, PatsyError) as e:
print('not in depth %d' % depth)
exc.append(e) # why do I need a reference from outside except block
pass
else:
raise exc[-1]
self.design_info = design.design_info
del d["restore_design_info"]
self.__dict__.update(d)
def _handle_constant(self, hasconst):
if hasconst is not None:
if hasconst:
self.k_constant = 1
self.const_idx = None
else:
self.k_constant = 0
self.const_idx = None
elif self.exog is None:
self.const_idx = None
self.k_constant = 0
else:
# detect where the constant is
check_implicit = False
const_idx = np.where(self.exog.ptp(axis=0) == 0)[0].squeeze()
self.k_constant = const_idx.size
if self.k_constant == 1:
if self.exog[:, const_idx].mean() != 0:
self.const_idx = const_idx
else:
# we only have a zero column and no other constant
check_implicit = True
elif self.k_constant > 1:
# we have more than one constant column
# look for ones
values = [] # keep values if we need != 0
for idx in const_idx:
value = self.exog[:, idx].mean()
if value == 1:
self.k_constant = 1
self.const_idx = idx
break
values.append(value)
else:
# we didn't break, no column of ones
pos = (np.array(values) != 0)
if pos.any():
# take the first nonzero column
self.k_constant = 1
self.const_idx = const_idx[pos.argmax()]
else:
# only zero columns
check_implicit = True
elif self.k_constant == 0:
check_implicit = True
else:
# shouldn't be here
pass
if check_implicit:
# look for implicit constant
# Compute rank of augmented matrix
augmented_exog = np.column_stack(
(np.ones(self.exog.shape[0]), self.exog))
rank_augm = np_matrix_rank(augmented_exog)
rank_orig = np_matrix_rank(self.exog)
self.k_constant = int(rank_orig == rank_augm)
self.const_idx = None
@classmethod
def _drop_nans(cls, x, nan_mask):
return x[nan_mask]
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
return x[nan_mask][:, nan_mask]
@classmethod
def handle_missing(cls, endog, exog, missing, **kwargs):
"""
This returns a dictionary with keys endog, exog and the keys of
kwargs. It preserves Nones.
"""
none_array_names = []
# patsy's already dropped NaNs in y/X
missing_idx = kwargs.pop('missing_idx', None)
if missing_idx is not None:
# y, X already handled by patsy. add back in later.
combined = ()
combined_names = []
if exog is None:
none_array_names += ['exog']
elif exog is not None:
combined = (endog, exog)
combined_names = ['endog', 'exog']
else:
combined = (endog,)
combined_names = ['endog']
none_array_names += ['exog']
# deal with other arrays
combined_2d = ()
combined_2d_names = []
if len(kwargs):
for key, value_array in iteritems(kwargs):
if value_array is None or value_array.ndim == 0:
none_array_names += [key]
continue
# grab 1d arrays
if value_array.ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
elif value_array.squeeze().ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
# grab 2d arrays that are _assumed_ to be symmetric
elif value_array.ndim == 2:
combined_2d += (np.asarray(value_array),)
combined_2d_names += [key]
else:
raise ValueError("Arrays with more than 2 dimensions "
"aren't yet handled")
if missing_idx is not None:
nan_mask = missing_idx
updated_row_mask = None
if combined: # there were extra arrays not handled by patsy
combined_nans = _nan_rows(*combined)
if combined_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra arrays given to model.")
# for going back and updated endog/exog
updated_row_mask = combined_nans[~nan_mask]
nan_mask |= combined_nans # for updating extra arrays only
if combined_2d:
combined_2d_nans = _nan_rows(combined_2d)
if combined_2d_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra 2d arrays given to model.")
if updated_row_mask is not None:
updated_row_mask |= combined_2d_nans[~nan_mask]
else:
updated_row_mask = combined_2d_nans[~nan_mask]
nan_mask |= combined_2d_nans
else:
nan_mask = _nan_rows(*combined)
if combined_2d:
nan_mask = _nan_rows(*(nan_mask[:, None],) + combined_2d)
if not np.any(nan_mask): # no missing don't do anything
combined = dict(zip(combined_names, combined))
if combined_2d:
combined.update(dict(zip(combined_2d_names, combined_2d)))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
if missing_idx is not None:
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
return combined, []
elif missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
elif missing == 'drop':
nan_mask = ~nan_mask
drop_nans = lambda x: cls._drop_nans(x, nan_mask)
drop_nans_2d = lambda x: cls._drop_nans_2d(x, nan_mask)
combined = dict(zip(combined_names, lmap(drop_nans, combined)))
if missing_idx is not None:
if updated_row_mask is not None:
updated_row_mask = ~updated_row_mask
# update endog/exog with this new information
endog = cls._drop_nans(endog, updated_row_mask)
if exog is not None:
exog = cls._drop_nans(exog, updated_row_mask)
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
if combined_2d:
combined.update(dict(zip(combined_2d_names,
lmap(drop_nans_2d, combined_2d))))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
return combined, np.where(~nan_mask)[0].tolist()
else:
raise ValueError("missing option %s not understood" % missing)
def _convert_endog_exog(self, endog, exog):
# for consistent outputs if endog is (n,1)
yarr = self._get_yarr(endog)
xarr = None
if exog is not None:
xarr = self._get_xarr(exog)
if xarr.ndim == 1:
xarr = xarr[:, None]
if xarr.ndim != 2:
raise ValueError("exog is not 1d or 2d")
return yarr, xarr
@cache_writable()
def ynames(self):
endog = self.orig_endog
ynames = self._get_names(endog)
if not ynames:
ynames = _make_endog_names(self.endog)
if len(ynames) == 1:
return ynames[0]
else:
return list(ynames)
@cache_writable()
def xnames(self):
exog = self.orig_exog
if exog is not None:
xnames = self._get_names(exog)
if not xnames:
xnames = _make_exog_names(self.exog)
return list(xnames)
return None
@property
def param_names(self):
# for handling names of 'extra' parameters in summary, etc.
return self._param_names or self.xnames
@param_names.setter
def param_names(self, values):
self._param_names = values
@cache_readonly
def row_labels(self):
exog = self.orig_exog
if exog is not None:
row_labels = self._get_row_labels(exog)
else:
endog = self.orig_endog
row_labels = self._get_row_labels(endog)
return row_labels
def _get_row_labels(self, arr):
return None
def _get_names(self, arr):
if isinstance(arr, DataFrame):
return list(arr.columns)
elif isinstance(arr, Series):
if arr.name:
return [arr.name]
else:
return
else:
try:
return arr.dtype.names
except AttributeError:
pass
return None
def _get_yarr(self, endog):
if data_util._is_structured_ndarray(endog):
endog = data_util.struct_to_ndarray(endog)
endog = np.asarray(endog)
if len(endog) == 1: # never squeeze to a scalar
if endog.ndim == 1:
return endog
elif endog.ndim > 1:
return np.asarray([endog.squeeze()])
return endog.squeeze()
def _get_xarr(self, exog):
if data_util._is_structured_ndarray(exog):
exog = data_util.struct_to_ndarray(exog)
return np.asarray(exog)
def _check_integrity(self):
if self.exog is not None:
if len(self.exog) != len(self.endog):
raise ValueError("endog and exog matrices are different sizes")
def wrap_output(self, obj, how='columns', names=None):
if how == 'columns':
return self.attach_columns(obj)
elif how == 'rows':
return self.attach_rows(obj)
elif how == 'cov':
return self.attach_cov(obj)
elif how == 'dates':
return self.attach_dates(obj)
elif how == 'columns_eq':
return self.attach_columns_eq(obj)
elif how == 'cov_eq':
return self.attach_cov_eq(obj)
elif how == 'generic_columns':
return self.attach_generic_columns(obj, names)
elif how == 'generic_columns_2d':
return self.attach_generic_columns_2d(obj, names)
elif how == 'ynames':
return self.attach_ynames(obj)
else:
return obj
def attach_columns(self, result):
return result
def attach_columns_eq(self, result):
return result
def attach_cov(self, result):
return result
def attach_cov_eq(self, result):
return result
def attach_rows(self, result):
return result
def attach_dates(self, result):
return result
def attach_generic_columns(self, result, *args, **kwargs):
return result
def attach_generic_columns_2d(self, result, *args, **kwargs):
return result
def attach_ynames(self, result):
return result
class PatsyData(ModelData):
def _get_names(self, arr):
return arr.design_info.column_names
class PandasData(ModelData):
"""
Data handling class which knows how to reattach pandas metadata to model
results
"""
def _convert_endog_exog(self, endog, exog=None):
#TODO: remove this when we handle dtype systematically
endog = np.asarray(endog)
exog = exog if exog is None else np.asarray(exog)
if endog.dtype == object or exog is not None and exog.dtype == object:
raise ValueError("Pandas data cast to numpy dtype of object. "
"Check input data with np.asarray(data).")
return super(PandasData, self)._convert_endog_exog(endog, exog)
@classmethod
def _drop_nans(cls, x, nan_mask):
if hasattr(x, 'ix'):
return x.ix[nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans(x, nan_mask)
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
if hasattr(x, 'ix'):
return x.ix[nan_mask].ix[:, nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans_2d(x, nan_mask)
def _check_integrity(self):
endog, exog = self.orig_endog, self.orig_exog
# exog can be None and we could be upcasting one or the other
if (exog is not None and
(hasattr(endog, 'index') and hasattr(exog, 'index')) and
not self.orig_endog.index.equals(self.orig_exog.index)):
raise ValueError("The indices for endog and exog are not aligned")
super(PandasData, self)._check_integrity()
def _get_row_labels(self, arr):
try:
return arr.index
except AttributeError:
# if we've gotten here it's because endog is pandas and
# exog is not, so just return the row labels from endog
return self.orig_endog.index
def attach_generic_columns(self, result, names):
# get the attribute to use
column_names = getattr(self, names, None)
return Series(result, index=column_names)
def attach_generic_columns_2d(self, result, rownames, colnames=None):
colnames = colnames or rownames
rownames = getattr(self, rownames, None)
colnames = getattr(self, colnames, None)
return DataFrame(result, index=rownames, columns=colnames)
def attach_columns(self, result):
# this can either be a 1d array or a scalar
# don't squeeze because it might be a 2d row array
# if it needs a squeeze, the bug is elsewhere
if result.ndim <= 1:
return Series(result, index=self.param_names)
else: # for e.g., confidence intervals
return DataFrame(result, index=self.param_names)
def attach_columns_eq(self, result):
return DataFrame(result, index=self.xnames, columns=self.ynames)
def attach_cov(self, result):
return DataFrame(result, index=self.param_names,
columns=self.param_names)
def attach_cov_eq(self, result):
return DataFrame(result, index=self.ynames, columns=self.ynames)
def attach_rows(self, result):
# assumes if len(row_labels) > len(result) it's bc it was truncated
# at the front, for AR lags, for example
squeezed = result.squeeze()
# May be zero-dim, for example in the case of forecast one step in tsa
if squeezed.ndim < 2:
return Series(squeezed, index=self.row_labels[-len(result):])
else:
return DataFrame(result, index=self.row_labels[-len(result):],
columns=self.ynames)
def attach_dates(self, result):
squeezed = result.squeeze()
# May be zero-dim, for example in the case of forecast one step in tsa
if squeezed.ndim < 2:
return TimeSeries(squeezed, index=self.predict_dates)
else:
return DataFrame(result, index=self.predict_dates,
columns=self.ynames)
def attach_ynames(self, result):
squeezed = result.squeeze()
# May be zero-dim, for example in the case of forecast one step in tsa
if squeezed.ndim < 2:
return TimeSeries(squeezed, name=self.ynames)
else:
return DataFrame(result, columns=self.ynames)
def _make_endog_names(endog):
if endog.ndim == 1 or endog.shape[1] == 1:
ynames = ['y']
else: # for VAR
ynames = ['y%d' % (i+1) for i in range(endog.shape[1])]
return ynames
def _make_exog_names(exog):
exog_var = exog.var(0)
if (exog_var == 0).any():
# assumes one constant in first or last position
# avoid exception if more than one constant
const_idx = exog_var.argmin()
exog_names = ['x%d' % i for i in range(1, exog.shape[1])]
exog_names.insert(const_idx, 'const')
else:
exog_names = ['x%d' % i for i in range(1, exog.shape[1]+1)]
return exog_names
def handle_missing(endog, exog=None, missing='none', **kwargs):
klass = handle_data_class_factory(endog, exog)
if missing == 'none':
ret_dict = dict(endog=endog, exog=exog)
ret_dict.update(kwargs)
return ret_dict, None
return klass.handle_missing(endog, exog, missing=missing, **kwargs)
def handle_data_class_factory(endog, exog):
"""
Given inputs
"""
if data_util._is_using_ndarray_type(endog, exog):
klass = ModelData
elif data_util._is_using_pandas(endog, exog):
klass = PandasData
elif data_util._is_using_patsy(endog, exog):
klass = PatsyData
# keep this check last
elif data_util._is_using_ndarray(endog, exog):
klass = ModelData
else:
raise ValueError('unrecognized data structures: %s / %s' %
(type(endog), type(exog)))
return klass
def handle_data(endog, exog, missing='none', hasconst=None, **kwargs):
# deal with lists and tuples up-front
if isinstance(endog, (list, tuple)):
endog = np.asarray(endog)
if isinstance(exog, (list, tuple)):
exog = np.asarray(exog)
klass = handle_data_class_factory(endog, exog)
return klass(endog, exog=exog, missing=missing, hasconst=hasconst,
**kwargs)
|
bsd-3-clause
|
hsiaoyi0504/scikit-learn
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
5245
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
bsd-3-clause
|
MechCoder/sympy
|
sympy/plotting/tests/test_plot_implicit.py
|
52
|
2912
|
import warnings
from sympy import (plot_implicit, cos, Symbol, symbols, Eq, sin, re, And, Or, exp, I,
tan, pi)
from sympy.plotting.plot import unset_show
from tempfile import NamedTemporaryFile
from sympy.utilities.pytest import skip
from sympy.external import import_module
#Set plots not to show
unset_show()
def tmp_file(name=''):
return NamedTemporaryFile(suffix='.png').name
def plot_and_save(name):
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
#implicit plot tests
plot_implicit(Eq(y, cos(x)), (x, -5, 5), (y, -2, 2)).save(tmp_file(name))
plot_implicit(Eq(y**2, x**3 - x), (x, -5, 5),
(y, -4, 4)).save(tmp_file(name))
plot_implicit(y > 1 / x, (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y < 1 / tan(x), (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y >= 2 * sin(x) * cos(x), (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y <= x**2, (x, -3, 3),
(y, -1, 5)).save(tmp_file(name))
#Test all input args for plot_implicit
plot_implicit(Eq(y**2, x**3 - x)).save(tmp_file())
plot_implicit(Eq(y**2, x**3 - x), adaptive=False).save(tmp_file())
plot_implicit(Eq(y**2, x**3 - x), adaptive=False, points=500).save(tmp_file())
plot_implicit(y > x, (x, -5, 5)).save(tmp_file())
plot_implicit(And(y > exp(x), y > x + 2)).save(tmp_file())
plot_implicit(Or(y > x, y > -x)).save(tmp_file())
plot_implicit(x**2 - 1, (x, -5, 5)).save(tmp_file())
plot_implicit(x**2 - 1).save(tmp_file())
plot_implicit(y > x, depth=-5).save(tmp_file())
plot_implicit(y > x, depth=5).save(tmp_file())
plot_implicit(y > cos(x), adaptive=False).save(tmp_file())
plot_implicit(y < cos(x), adaptive=False).save(tmp_file())
plot_implicit(And(y > cos(x), Or(y > x, Eq(y, x)))).save(tmp_file())
plot_implicit(y - cos(pi / x)).save(tmp_file())
#Test plots which cannot be rendered using the adaptive algorithm
#TODO: catch the warning.
plot_implicit(Eq(y, re(cos(x) + I*sin(x)))).save(tmp_file(name))
with warnings.catch_warnings(record=True) as w:
plot_implicit(x**2 - 1, legend='An implicit plot').save(tmp_file())
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert 'No labeled objects found' in str(w[0].message)
def test_line_color():
x, y = symbols('x, y')
p = plot_implicit(x**2 + y**2 - 1, line_color="green", show=False)
assert p._series[0].line_color == "green"
p = plot_implicit(x**2 + y**2 - 1, line_color='r', show=False)
assert p._series[0].line_color == "r"
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
plot_and_save('test')
test_line_color()
else:
skip("Matplotlib not the default backend")
|
bsd-3-clause
|
hainm/scikit-learn
|
examples/linear_model/plot_logistic_path.py
|
349
|
1195
|
#!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
dcolombo/FilFinder
|
fil_finder/filfind_class.py
|
1
|
62866
|
# Licensed under an MIT open source license - see LICENSE
from cores import *
from length import *
from pixel_ident import *
from utilities import *
from width import *
from rollinghough import rht
from analysis import Analysis
import numpy as np
import matplotlib.pyplot as p
import scipy.ndimage as nd
from scipy.stats import lognorm
from scipy.ndimage import distance_transform_edt
from skimage.filters import threshold_adaptive
from skimage.morphology import remove_small_objects, medial_axis
from scipy.stats import scoreatpercentile
from astropy.io import fits
from astropy.table import Table
import astropy.units as u
from copy import deepcopy
import os
import time
import warnings
class fil_finder_2D(object):
"""
This class acts as an overall wrapper to run the fil-finder algorithm
on 2D images and contains visualization and saving capabilities.
Parameters
------
image : numpy.ndarray
A 2D array of the data to be analyzed.
hdr : dictionary
The header from fits file containing the data.
beamwidth : float
The FWHM beamwidth (in arcseconds) of the instrument used to
take the data.
skel_thresh : float, optional
Given in pixel units.Below this cut off, skeletons with less pixels
will be deleted. The default value is 0.3 pc converted to pixels.
branch_thresh : float, optional
Any branches shorter than this length (in pixels) will be labeled as
extraneous and pruned off. The default value is 3 times the FWHM
beamwidth.
pad_size : int, optional
The size of the pad (in pixels) used to pad the individual
filament arrays. The default is set to 10 pixels. The amount of
padding can effect extent of the radial intensity profile as well
as ensuring that useful data is not cut off during adaptive
thresholding.
flatten_thresh : int, optional
The percentile of the data (0-100) to set the normalization of the arctan
transform. By default, a log-normal distribution is fit and the
threshold is set to :math:`\mu + 2\sigma`. If the data contains regions
of a much higher intensity than the mean, it is recommended this
be set >95 percentile.
smooth_size : int, optional
The patch size (in pixels) used to smooth the flatten image before
adaptive thresholding is performed. Smoothing is necessary to ensure
the extraneous branches on the skeletons is minimized.
If None, the patch size is set to ~0.05 pc. This ensures the large
scale structure is not affected while smoothing extraneous pixels off
the edges.
size_thresh : int, optional
This sets the lower threshold on the size of objects found in the
adaptive thresholding. If None, the value is set at
:math:`5\pi (0.1 \text(pc))^2` which is the area of the minimum dimensions
expected for a filament. Any region smaller than this threshold may be
safely labeled as an artifact of the thresholding.
glob_thresh : float, optional
This is the percentile of the data to mask off. All intensities below
are cut off from being included in the filamentary structure.
adapt_thresh : int, optional
This is the size of the patch used in the adaptive thresholding.
Bright structure is not very sensitive to the choice of patch size,
but faint structure is very sensitive. If None, the patch size is set
to twice the width of a typical filament (~0.2 pc). As the width of
filaments is somewhat ubiquitous, this patch size generally segments
all filamentary structure in a given image.
distance : float, optional
The distance to the region being examined (in pc). If None, the
analysis is carried out in pixel and angular units. In this case,
the physical priors used in other optional parameters is meaningless
and each must be specified initially.
region_slice : list, optional
This gives the option to examine a specific region in the given image.
The expected input is [xmin,xmax,ymin,max].
mask : numpy.ndarray, optional
A pre-made, boolean mask may be supplied to skip the segmentation
process. The algorithm will skeletonize and run the analysis portions
only.
freq : float, optional
**NOT FULLY SUPPORTED IN THIS RELEASE**
Frequency of the image. This is required for using the cylindrical
model (cyl_model) for the widths.
save_name : str, optional
Sets the prefix name that is used for output files. Can be overridden
in ``save_fits`` and ``save_table``. Default is "FilFinder_output".
Examples
--------
>>> from fil_finder import fil_finder_2D
>>> from astropy.io.fits import getdata
>>> img,hdr = getdata("/srv/astro/erickoch/gould_belt/chamaeleonI-250.fits", header=True)
>>> filfind = fil_finder_2D(img, hdr, 15.1, distance=170,
region_slice=[620,1400,430,1700], save_name='chamaeleonI-250')
>>> filfind.run(verbose=False)
"""
def __init__(self, image, hdr, beamwidth, skel_thresh=None,
branch_thresh=None, pad_size=10, flatten_thresh=None,
smooth_size=None, size_thresh=None, glob_thresh=None,
adapt_thresh=None, distance=None, region_slice=None,
mask=None, freq=None, save_name="FilFinder_output"):
img_dim = len(image.shape)
if img_dim < 2 or img_dim > 2:
raise TypeError(
"Image must be 2D array. Input array has %s dimensions."
% (img_dim))
if region_slice is None:
self.image = image
else:
slices = (slice(region_slice[0], region_slice[1], None),
slice(region_slice[2], region_slice[3], None))
self.image = np.pad(image[slices], 1, padwithzeros)
self.header = hdr
self.skel_thresh = skel_thresh
self.branch_thresh = branch_thresh
self.pad_size = pad_size
self.freq = freq
self.save_name = save_name
# If pre-made mask is provided, remove nans if any.
self.mask = None
if mask is not None:
mask[np.isnan(mask)] = 0.0
self.mask = mask
# Pad the image by the pad size. Avoids slicing difficulties
# later on.
self.image = np.pad(self.image, self.pad_size, padwithnans)
# Make flattened image
if flatten_thresh is None:
# Fit to a log-normal
fit_vals = lognorm.fit(self.image[~np.isnan(self.image)])
median = lognorm.median(*fit_vals)
std = lognorm.std(*fit_vals)
thresh_val = median + 2*std
else:
thresh_val = scoreatpercentile(self.image[~np.isnan(self.image)],
flatten_thresh)
self.flat_img = np.arctan(self.image / thresh_val)
if distance is None:
print "No distance given. Results will be in pixel units."
self.imgscale = 1.0 # pixel
# where CDELT2 is in degrees
self.beamwidth = beamwidth * (hdr["CDELT2"] * 3600) ** (-1)
self.pixel_unit_flag = True
else:
self.imgscale = (hdr['CDELT2'] * (np.pi / 180.0) * distance) # pc
self.beamwidth = (
beamwidth / np.sqrt(8 * np.log(2.))) * \
(1 / 206265.) * distance
self.pixel_unit_flag = False
# Angular conversion (sr/pixel^2)
self.angular_scale = ((hdr['CDELT2'] * u.degree) ** 2.).to(u.sr).value
self.glob_thresh = glob_thresh
self.adapt_thresh = adapt_thresh
self.smooth_size = smooth_size
self.size_thresh = size_thresh
self.width_fits = {"Parameters": [], "Errors": [], "Names": None}
self.rht_curvature = {"Median": [], "IQR": []}
self.filament_arrays = {}
def create_mask(self, glob_thresh=None, adapt_thresh=None,
smooth_size=None, size_thresh=None, verbose=False,
test_mode=False, regrid=True, border_masking=True,
zero_border=False, fill_hole_size=None,
use_existing_mask=False, save_png=False):
'''
This runs the complete segmentation process and returns a mask of the
filaments found. The process is broken into six steps:
* An arctan tranform is taken to flatten extremely bright regions.
Adaptive thresholding is very sensitive to local intensity changes
and small, bright objects(ie. cores) will leave patch-sized holes
in the mask.
* The flattened image is smoothed over with a median filter.
The size of the patch used here is set to be much smaller than the
typical filament width. Smoothing is necessary to minimizing
extraneous branches when the medial axis transform is taken.
* A binary opening is performed using an 8-connected structure
element. This is very successful at removing small regions around
the edge of the data.
* Objects smaller than a certain threshold (set to be ~1/10 the area
of a small filament) are removed to ensure only regions which are
sufficiently large enough to be real structure remain.
The parameters for this function are as previously defined.
They are included here for fine-tuning purposes only.
Parameters
----------
smooth_size : int, optional
See definition in ``fil_finder_2D`` inputs.
size_thresh : int, optional
See definition in ``fil_finder_2D`` inputs.
glob_thresh : float, optional
See definition in ``fil_finder_2D`` inputs.
adapt_thresh : int, optional
See definition in ``fil_finder_2D`` inputs.
verbose : bool, optional
Enables plotting. Default is False.
test_mode : bool, optional
Plot each masking step. Default is False.
regrid : bool, optional
Enables the regridding of the image to larger sizes when the patch
size for the adaptive thresholding is less than 40 pixels. This
decreases fragmentation of regions due to pixellization effects.
Default is True.
border_masking : bool, optional
Dilates a mask of the regions along the edge of the image to remove
regions dominated by noise. Disabling leads to regions characterized
at the image boundaries and should only be used if there is not
significant noise at the edges. Default is True.
zero_border : bool, optional
Replaces the NaN border with zeros for the adaptive thresholding.
This is useful when emission continues to the edge of the image.
Default is False.
fill_hole_size : int or float, optional
Sets the maximum hole size to fill in the skeletons. If <1,
maximum is that proportion of the total number of pixels in
skeleton. Otherwise, it sets the maximum number of pixels.
Defaults to a square area with length of the beamwidth.
use_existing_mask : bool, optional
If ``mask`` is already specified, enabling this skips
recomputing the mask.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
Attributes
----------
mask : numpy.ndarray
The mask of filaments.
'''
if self.mask is not None and use_existing_mask:
warnings.warn("Using inputted mask. Skipping creation of a new mask.")
return self # Skip if pre-made mask given
if glob_thresh is not None:
self.glob_thresh = glob_thresh
if adapt_thresh is not None:
self.adapt_thresh = adapt_thresh
if smooth_size is not None:
self.smooth_size = smooth_size
if size_thresh is not None:
self.size_thresh = size_thresh
if self.pixel_unit_flag:
if smooth_size is None:
raise ValueError("Distance not given. Must specify smooth_size"
" in pixel units.")
if adapt_thresh is None:
raise ValueError("Distance not given. Must specify adapt_thresh"
" in pixel units.")
if size_thresh is None:
raise ValueError("Distance not given. Must specify size_thresh"
" in pixel units.")
if self.size_thresh is None:
if self.beamwidth == 0.0:
warnings.warn("Beam width is set to 0.0."
"The size threshold is then 0. It is recommended"
"that size_thresh is manually set.")
self.size_thresh = round(
np.pi * 5 * (0.1)**2. * self.imgscale ** -2)
# Area of ellipse for typical filament size. Divided by 10 to
# incorporate sparsity.
if self.adapt_thresh is None:
# twice average FWHM for filaments
self.adapt_thresh = round(0.2 / self.imgscale)
if self.smooth_size is None:
# half average FWHM for filaments
self.smooth_size = round(0.05 / self.imgscale)
# Check if regridding is even necessary
if self.adapt_thresh >= 40 and regrid:
regrid = False
warnings.warn("Adaptive thresholding patch is larger than 40"
"pixels. Regridding has been disabled.")
# Adaptive thresholding can't handle nans, so we create a nan mask
# by finding the large, outer regions, smoothing with a large median
# filter and eroding it.
# Make a copy of the flattened image
flat_copy = self.flat_img.copy()
# Make the nan mask
if border_masking:
nan_mask = np.isnan(flat_copy)
nan_mask = remove_small_objects(nan_mask, min_size=50,
connectivity=8)
nan_mask = np.logical_not(nan_mask)
nan_mask = nd.median_filter(nan_mask, 25)
nan_mask = nd.binary_erosion(nan_mask, eight_con(),
iterations=15)
else:
nan_mask = np.logical_not(np.isnan(flat_copy))
# Perform regridding
if regrid:
# Remove nans in the copy
flat_copy[np.isnan(flat_copy)] = 0.0
# Calculate the needed zoom to make the patch size ~40 pixels
ratio = 40 / self.adapt_thresh
# Round to the nearest factor of 2
regrid_factor = np.min([2., int(round(ratio/2.0)*2.0)])
# Defaults to cubic interpolation
masking_img = nd.zoom(flat_copy, (regrid_factor, regrid_factor))
else:
regrid_factor = 1
ratio = 1
masking_img = flat_copy
smooth_img = nd.median_filter(masking_img,
size=round(self.smooth_size*ratio))
# Set the border to zeros for the adaptive thresholding. Avoid border
# effects.
if zero_border:
smooth_img[:self.pad_size*ratio+1, :] = 0.0
smooth_img[-self.pad_size*ratio-1:, :] = 0.0
smooth_img[:, :self.pad_size*ratio+1] = 0.0
smooth_img[:, -self.pad_size*ratio-1:] = 0.0
adapt = threshold_adaptive(smooth_img,
round(ratio * self.adapt_thresh),
method="mean")
if regrid:
regrid_factor = float(regrid_factor)
adapt = nd.zoom(adapt, (1/regrid_factor, 1/regrid_factor), order=0)
# Remove areas near the image border
adapt = adapt * nan_mask
if self.glob_thresh is not None:
thresh_value = \
np.max([0.0,
scoreatpercentile(self.flat_img[~np.isnan(self.flat_img)],
self.glob_thresh)])
glob = flat_copy > thresh_value
adapt = glob * adapt
cleaned = \
remove_small_objects(adapt, min_size=self.size_thresh)
# Remove small holes within the object
if fill_hole_size is None:
fill_hole_size = np.pi*(self.beamwidth/self.imgscale)**2
mask_objs, num, corners = \
isolateregions(cleaned, fill_hole=True, rel_size=fill_hole_size,
morph_smooth=True)
self.mask = recombine_skeletons(mask_objs,
corners, self.image.shape,
self.pad_size, verbose=True)
# WARNING!! Setting some image values to 0 to avoid negative weights.
# This may cause issues, however it will allow for proper skeletons
# Through all the testing and deriving science results, this has not
# been an issue! EK
self.image[np.where((self.mask * self.image) < 0.0)] = 0
if test_mode:
p.imshow(np.log10(self.image), origin="lower", interpolation=None,
cmap='binary')
p.colorbar()
p.show()
p.imshow(masking_img, origin="lower", interpolation=None,
cmap='binary')
p.colorbar()
p.show()
p.imshow(smooth_img, origin="lower", interpolation=None,
cmap='binary')
p.colorbar()
p.show()
p.imshow(adapt, origin="lower", interpolation=None,
cmap='binary')
p.show()
p.imshow(cleaned, origin="lower", interpolation=None,
cmap='binary')
p.show()
p.imshow(self.mask, origin="lower", interpolation=None,
cmap='binary')
p.show()
if verbose or save_png:
vmin = np.percentile(self.flat_img[np.isfinite(self.flat_img)], 20)
vmax = np.percentile(self.flat_img[np.isfinite(self.flat_img)], 90)
p.imshow(self.flat_img, interpolation=None, origin="lower",
cmap='binary', vmin=vmin, vmax=vmax)
p.contour(self.mask, colors="r")
p.title("Mask on Flattened Image.")
if save_png:
try_mkdir(self.save_name)
p.savefig(os.path.join(self.save_name, self.save_name+"_mask.png"))
if verbose:
p.show()
p.clf()
return self
def medskel(self, return_distance=True, verbose=False, save_png=False):
'''
This function performs the medial axis transform (skeletonization)
on the mask. This is essentially a wrapper function of
skimage.morphology.medial_axis with the ability to delete narrow
regions in the mask.
If the distance transform is returned from the transform, it is used
as a pruning step. Regions where the width of a region are far too
small (set to >0.01 pc) are deleted. This ensures there no unnecessary
connections between filaments.
Parameters
----------
return_distance : bool, optional
This sets whether the distance transform is returned from
skimage.morphology.medial_axis.
verbose : bool, optional
Enables plotting.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
Attributes
----------
skeleton : numpy.ndarray
The array containing all of the skeletons.
medial_axis_distance : numpy.ndarray
The distance transform used to create the skeletons.
'''
if return_distance:
self.skeleton, self.medial_axis_distance = medial_axis(
self.mask, return_distance=return_distance)
self.medial_axis_distance = self.medial_axis_distance * \
self.skeleton
# Delete connection smaller than 2 pixels wide. Such a small
# connection is more likely to be from limited pixel resolution
# rather than actual structure.
width_threshold = 1
narrow_pts = np.where(self.medial_axis_distance < width_threshold)
self.skeleton[narrow_pts] = 0 # Eliminate narrow connections
self.medial_axis_distance[narrow_pts] = 0
else:
self.skeleton = medial_axis(self.mask)
self.medial_axis_skeleton = None
if verbose or save_png: # For examining results of skeleton
vmin = np.percentile(self.flat_img[np.isfinite(self.flat_img)], 20)
vmax = np.percentile(self.flat_img[np.isfinite(self.flat_img)], 90)
p.imshow(self.flat_img, interpolation=None, origin="lower",
cmap='binary', vmin=vmin, vmax=vmax)
p.contour(self.skeleton, colors="r")
if save_png:
try_mkdir(self.save_name)
p.savefig(os.path.join(self.save_name,
self.save_name+"_initial_skeletons.png"))
if verbose:
p.show()
p.clf()
return self
def analyze_skeletons(self, relintens_thresh=0.2, nbeam_lengths=5,
branch_nbeam_lengths=3,
skel_thresh=None, branch_thresh=None,
verbose=False, save_png=False):
'''
This function wraps most of the skeleton analysis. Several steps are
completed here:
* isolatefilaments is run to separate each skeleton into its own
array. If the skeletons are under the threshold set by
self.size_thresh, the region is removed. An updated mask is
also returned.
* pix_identify classifies each of the pixels in a skeleton as a
body, end, or intersection point. See the documentation on find_filpix
for a complete explanation. The function labels the branches and
intersections of each skeletons.
* init_lengths finds the length of each branch in each skeleton and
also returns the coordinates of each of these branches for use in
the graph representation.
* pre_graph turns the skeleton structures into a graphing format
compatible with networkx. Hubs in the graph are the intersections
and end points, labeled as letters and numbers respectively. Edges
define the connectivity of the hubs and they are weighted by their
length.
* longest_path utilizes networkx.shortest_path_length to find the
overall length of each of the filaments. The returned path is the
longest path through the skeleton. If loops exist in the skeleton,
the longest path is chosen (this shortest path algorithm fails when
used on loops).
* extremum_pts returns the locations of the longest path's extent
so its performance can be evaluated.
* final_lengths takes the path returned from longest_path and
calculates the overall length of the filament. This step also acts
as to prune the skeletons.
* final_analysis combines the outputs and returns the results for
further analysis.
Parameters
----------
verbose : bool, optional
Enables plotting.
relintens_thresh : float, optional
Relative intensity threshold for pruning. Sets the importance
a branch must have in intensity relative to all other branches
in the skeleton. Must be between (0.0, 1.0].
nbeam_lengths : float or int, optional
Sets the minimum skeleton length based on the number of beam
sizes specified.
branch_nbeam_lengths : float or int, optional
Sets the minimum branch length based on the number of beam
sizes specified.
skel_thresh : float, optional
Manually set the minimum skeleton threshold. Overrides all
previous settings.
branch_thresh : float, optional
Manually set the minimum branch length threshold. Overrides all
previous settings.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
Attributes
----------
filament_arrays : list of numpy.ndarray
Contains individual arrays of each skeleton
number_of_filaments : int
The number of individual filaments.
array_offsets : list
A list of coordinates for each filament array.This will
be used to recombine the final skeletons into one array.
filament_extents : list
This contains the coordinates of the initial and final
position of the skeleton's extent. It may be used to
test the performance of the shortest path algorithm.
lengths : list
Contains the overall lengths of the skeletons
labeled_fil_arrays : list of numpy.ndarray
Contains the final labeled versions of the skeletons.
branch_properties : dict
The significant branches of the skeletons have their length
and number of branches in each skeleton stored here.
The keys are: *filament_branches*, *branch_lengths*
'''
if relintens_thresh > 1.0 or relintens_thresh <= 0.0:
raise ValueError(
"relintens_thresh must be set between (0.0, 1.0].")
if self.pixel_unit_flag:
if self.skel_thresh is None and skel_thresh is None:
raise ValueError("Distance not given. Must specify skel_thresh"
" in pixel units.")
# Set the skeleton length threshold to some factor of the beam width
if self.skel_thresh is None:
self.skel_thresh = round(0.3 / self.imgscale)
# round( self.beamwidth * nbeam_lengths / self.imgscale)
elif skel_thresh is not None:
self.skel_thresh = skel_thresh
# Set the minimum branch length to be the beam size.
if self.branch_thresh is None:
self.branch_thresh = \
round(branch_nbeam_lengths * self.beamwidth / self.imgscale)
elif branch_thresh is not None:
self.branch_thresh = branch_thresh
isolated_filaments, num, offsets = \
isolateregions(self.skeleton, size_threshold=self.skel_thresh,
pad_size=self.pad_size)
self.number_of_filaments = num
self.array_offsets = offsets
interpts, hubs, ends, filbranches, labeled_fil_arrays = \
pix_identify(isolated_filaments, num)
self.branch_properties = init_lengths(
labeled_fil_arrays, filbranches, self.array_offsets, self.image)
# Add the number of branches onto the dictionary
self.branch_properties["number"] = filbranches
edge_list, nodes = pre_graph(
labeled_fil_arrays, self.branch_properties, interpts, ends)
max_path, extremum, G = \
longest_path(edge_list, nodes,
verbose=verbose,
save_png=save_png,
save_name=self.save_name,
skeleton_arrays=labeled_fil_arrays,
lengths=self.branch_properties["length"])
updated_lists = \
prune_graph(G, nodes, edge_list, max_path, labeled_fil_arrays,
self.branch_properties, self.branch_thresh,
relintens_thresh=relintens_thresh)
labeled_fil_arrays, edge_list, nodes, self.branch_properties = \
updated_lists
self.filament_extents = extremum_pts(
labeled_fil_arrays, extremum, ends)
length_output = main_length(max_path, edge_list, labeled_fil_arrays,
interpts, self.branch_properties[
"length"], self.imgscale,
verbose=verbose, save_png=save_png,
save_name=self.save_name)
self.lengths, self.filament_arrays["long path"] = length_output
# Convert lengths to numpy array
self.lengths = np.asarray(self.lengths)
self.filament_arrays["final"] =\
make_final_skeletons(labeled_fil_arrays, interpts,
verbose=verbose, save_png=save_png,
save_name=self.save_name)
self.labelled_filament_arrays = labeled_fil_arrays
# Convert branch lengths physical units
for n in range(self.number_of_filaments):
lengths = self.branch_properties["length"][n]
self.branch_properties["length"][n] = [
self.imgscale * length for length in lengths]
self.skeleton = \
recombine_skeletons(self.filament_arrays["final"],
self.array_offsets, self.image.shape,
self.pad_size, verbose=True)
self.skeleton_longpath = \
recombine_skeletons(self.filament_arrays["long path"],
self.array_offsets, self.image.shape,
self.pad_size, verbose=True)
return self
def exec_rht(self, radius=10, ntheta=180, background_percentile=25,
branches=False, min_branch_length=3, verbose=False,
save_png=False):
'''
Implements the Rolling Hough Transform (Clark et al., 2014).
The orientation of each filament is denoted by the mean value of the
RHT, which from directional statistics can be defined as:
:math:`\\langle\\theta \\rangle = \\frac{1}{2} \\tan^{-1}\\left(\\frac{\\Sigma_i w_i\\sin2\\theta_i}{\\Sigma_i w_i\\cos2\\theta_i}\\right)`
where :math:`w_i` is the normalized value of the RHT at
:math:`\\theta_i`. This definition assumes that :math:`\\Sigma_iw_i=1`.
:math:`\\theta` is defined on :math:`\\left[-\\pi/2, \\pi/2\\right)`.
"Curvature" is represented by the IQR confidence interval about the mean,
:math:`\\langle\\theta \\rangle \\pm \\sin^{-1} \\left( u_{\\alpha} \\sqrt{ \\frac{1-\\alpha}{2R^2} } \\right)`
where :math:`u_{\\alpha}` is the z-score of the two-tail probability,
:math:`\\alpha=\\Sigma_i\\cos{\\left[2w_i\\left(\\theta_i-\\langle\\theta\\rangle\\right)\\right]}`
is the estimated weighted second trigonometric moment and
:math:`R^2=\\left[\\left(\\Sigma_iw_i\\sin{\\theta_i}\\right)^2 +\\left(\\Sigma_iw_i\\cos{\\theta_i}\\right)^2\\right]`
is the weighted length of the vector.
These equations can be found in Fisher & Lewis (1983).
Parameters
----------
radius : int
Sets the patch size that the RHT uses.
ntheta : int, optional
The number of bins to use for the RHT.
background : int, optional
RHT distribution often has a constant background. This sets the
percentile to subtract off.
branches : bool, optional
If enabled, runs the RHT on individual branches in the skeleton.
min_branch_length : int, optional
Sets the minimum pixels a branch must have to calculate the RHT
verbose : bool, optional
Enables plotting.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
Attributes
----------
rht_curvature : dict
Contains the median and IQR for each filament.
References
----------
`Clark et al. (2014) <http://adsabs.harvard.edu/abs/2014ApJ...789...82C>`_
`Fisher & Lewis (1983) <http://biomet.oxfordjournals.org/content/70/2/333.short>`_
'''
if not self.rht_curvature["Median"]:
pass
else:
self.rht_curvature = {"Median": [], "IQR": []}
# Flag branch output
self._rht_branches_flag = False
if branches:
self._rht_branches_flag = True
# Set up new dict entries.
self.rht_curvature["Intensity"] = []
self.rht_curvature["Length"] = []
for n in range(self.number_of_filaments):
# Need to correct for how image is read in
# fliplr aligns angles with image when shown in ds9
if branches:
# We need intermediary arrays now
medians = np.array([])
iqrs = np.array([])
intensity = np.array([])
lengths = np.array([])
# See above comment (613-614)
skel_arr = np.fliplr(self.filament_arrays["final"][n])
# Return the labeled skeleton without intersections
output = \
pix_identify([skel_arr], 1)[-2:]
labeled_fil_array = output[1]
filbranch = output[0]
branch_properties = init_lengths(labeled_fil_array,
filbranch,
[self.array_offsets[n]],
self.image)
labeled_fil_array = labeled_fil_array[0]
filbranch = filbranch[0]
# Return the labeled skeleton without intersections
labeled_fil_array = pix_identify([skel_arr], 1)[-1][0]
branch_labels = \
np.unique(labeled_fil_array[np.nonzero(labeled_fil_array)])
for val in branch_labels:
length = branch_properties["length"][0][val-1]
# Only include the branches with >10 pixels
if length < min_branch_length:
continue
theta, R, quantiles = \
rht(labeled_fil_array == val,
radius, ntheta, background_percentile)
twofive, median, sevenfive = quantiles
medians = np.append(medians, median)
if sevenfive > twofive:
iqrs = \
np.append(iqrs,
np.abs(sevenfive - twofive))
else: #
iqrs = \
np.append(iqrs,
np.abs(sevenfive - twofive) + np.pi)
intensity = np.append(intensity, branch_properties["intensity"][0][val-1])
lengths = np.append(lengths, branch_properties["length"][0][val-1])
self.rht_curvature["Median"].append(medians)
self.rht_curvature["IQR"].append(iqrs)
self.rht_curvature["Intensity"].append(intensity)
self.rht_curvature["Length"].append(lengths)
if verbose or save_png:
Warning("No verbose mode available when running RHT on individual"
" branches. No plots will be saved.")
else:
skel_arr = np.fliplr(self.filament_arrays["long path"][n])
theta, R, quantiles = rht(
skel_arr, radius, ntheta, background_percentile)
twofive, median, sevenfive = quantiles
self.rht_curvature["Median"].append(median)
if sevenfive > twofive:
self.rht_curvature["IQR"].append(
np.abs(sevenfive - twofive)) # Interquartile range
else: #
self.rht_curvature["IQR"].append(
np.abs(sevenfive - twofive + np.pi))
if verbose or save_png:
ax1 = p.subplot(121, polar=True)
ax1.plot(2 * theta, R / R.max(), "kD")
ax1.fill_between(2 * theta, 0,
R[:, 0] / R.max(),
facecolor="blue",
interpolate=True, alpha=0.5)
ax1.set_rmax(1.0)
ax1.plot([2 * median] * 2, np.linspace(0.0, 1.0, 2), "g")
ax1.plot([2 * twofive] * 2, np.linspace(0.0, 1.0, 2),
"b--")
ax1.plot([2 * sevenfive] * 2, np.linspace(0.0, 1.0, 2),
"b--")
p.subplot(122)
p.imshow(self.filament_arrays["long path"][n],
cmap="binary", origin="lower")
if save_png:
try_mkdir(self.save_name)
p.savefig(os.path.join(self.save_name,
self.save_name+"_rht_"+str(n)+".png"))
if verbose:
p.show()
p.clf()
return self
def find_widths(self, fit_model=gauss_model, try_nonparam=True,
verbose=False, save_png=False):
'''
The final step of the algorithm is to find the widths of each
of the skeletons. We do this by:
* A Euclidean Distance Transform is performed on each skeleton.
The skeletons are also recombined onto a single array. The
individual filament arrays are padded to ensure a proper radial
profile is created. If the padded arrays fall outside of the
original image, they are trimmed.
* A user-specified model is fit to each of the radial profiles.
There are three models included in this package; a gaussian,
lorentzian and a cylindrical filament model
(Arzoumanian et al., 2011). This returns the width and central
intensity of each filament. The reported widths are the
deconvolved FWHM of the gaussian width. For faint or crowded
filaments, the fit can fail due to lack of data to fit to.
In this case, we estimate the width non-parametrically.
Parameters
----------
fit_model : function
Function to fit to the radial profile.
try_nonparam : bool, optional
If True, uses a non-parametric method to find the properties of
the radial profile in cases where the model fails.\
verbose : bool, optional
Enables plotting.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
Attributes
----------
width_fits : dict
Contains the fit parameters and estimations of the errors
from each fit.
total_intensity : list
Sum of the intensity in each filament within 1 FWHM of the
skeleton.
'''
dist_transform_all, dist_transform_separate = \
dist_transform(self.filament_arrays["final"],
self.skeleton)
for n in range(self.number_of_filaments):
# Need the unbinned data for the non-parametric fit.
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(self.image, dist_transform_all,
dist_transform_separate[n],
self.array_offsets[n], self.imgscale)
if fit_model == cyl_model:
if self.freq is None:
print('''Image not converted to column density.
Fit parameters will not match physical meaning.
lease specify frequency.''')
else:
assert isinstance(self.freq, float)
radprof = dens_func(
planck(20., self.freq), 0.2, radprof) * (5.7e19)
fit, fit_error, model, parameter_names, fail_flag = \
fit_model(dist, radprof, weights, self.beamwidth)
# Get the function's name to track where fit values come from
fit_type = str(model.__name__)
if not fail_flag:
chisq = red_chisq(radprof, model(dist, *fit[:-1]), 3, 1)
else:
# Give a value above threshold to try non-parametric fit
chisq = 11.0
# If the model isn't doing a good job, try it non-parametrically
if chisq > 10.0 and try_nonparam:
fit, fit_error, fail_flag = \
nonparam_width(dist, radprof, unbin_dist, unbin_radprof,
self.beamwidth, 5, 99)
# Change the fit type.
fit_type = "nonparam"
if n == 0:
# Prepare the storage
self.width_fits["Parameters"] = np.empty(
(self.number_of_filaments, len(parameter_names)))
self.width_fits["Errors"] = np.empty(
(self.number_of_filaments, len(parameter_names)))
self.width_fits["Type"] = np.empty(
(self.number_of_filaments), dtype="S")
self.total_intensity = np.empty(
(self.number_of_filaments, ))
if verbose or save_png:
if verbose:
print "%s in %s" % (n, self.number_of_filaments)
print "Fit Parameters: %s " % (fit)
print "Fit Errors: %s" % (fit_error)
print "Fit Type: %s" % (fit_type)
p.subplot(121)
p.plot(dist, radprof, "kD")
points = np.linspace(np.min(dist), np.max(dist), 2 * len(dist))
try: # If FWHM is appended on, will get TypeError
p.plot(points, model(points, *fit), "r")
except TypeError:
p.plot(points, model(points, *fit[:-1]), "r")
p.xlabel(r'Radial Distance (pc)')
p.ylabel(r'Intensity')
p.grid(True)
p.subplot(122)
xlow, ylow = (
self.array_offsets[n][0][0], self.array_offsets[n][0][1])
xhigh, yhigh = (
self.array_offsets[n][1][0], self.array_offsets[n][1][1])
shape = (xhigh - xlow, yhigh - ylow)
p.contour(self.filament_arrays["final"][n]
[self.pad_size:shape[0] - self.pad_size,
self.pad_size:shape[1] - self.pad_size], colors="r")
img_slice = self.image[xlow + self.pad_size:xhigh - self.pad_size,
ylow + self.pad_size:yhigh - self.pad_size]
vmin = scoreatpercentile(img_slice[np.isfinite(img_slice)], 10)
p.imshow(img_slice, interpolation=None, vmin=vmin, origin='lower',
cmap='binary')
p.colorbar()
if save_png:
try_mkdir(self.save_name)
p.savefig(os.path.join(self.save_name,
self.save_name+"_width_fit_"+str(n)+".png"))
if verbose:
p.show()
p.clf()
# Final width check -- make sure length is longer than the width.
# If it is, add the width onto the length since the adaptive
# thresholding shortens each edge by the about the same.
if self.lengths[n] > fit[-1]:
self.lengths[n] += fit[-1]
else:
fail_flag = True
# If fail_flag has been set to true in any of the fitting steps,
# set results to nans
if fail_flag:
fit = [np.NaN] * len(fit)
fit_error = [np.NaN] * len(fit)
# Using the unbinned profiles, we can find the total filament
# brightness. This can later be used to estimate the mass
# contained in each filament.
within_width = np.where(unbin_dist <= fit[1])
if within_width[0].size: # Check if its empty
# Subtract off the estimated background
fil_bright = unbin_radprof[within_width] - fit[2]
sum_bright = np.sum(fil_bright[fil_bright >= 0], axis=None)
self.total_intensity[n] = sum_bright * self.angular_scale
else:
self.total_intensity[n] = np.NaN
self.width_fits["Parameters"][n, :] = fit
self.width_fits["Errors"][n, :] = fit_error
self.width_fits["Type"][n] = fit_type
self.width_fits["Names"] = parameter_names
return self
def compute_filament_brightness(self):
'''
Returns the median brightness along the skeleton of the filament.
Attributes
----------
filament_brightness : list
Average brightness/intensity over the skeleton pixels
for each filament.
'''
self.filament_brightness = []
labels, n = nd.label(self.skeleton, eight_con())
for n in range(1, self.number_of_filaments+1):
values = self.image[np.where(labels == n)]
self.filament_brightness.append(np.median(values))
return self
def filament_model(self, max_radius=25):
'''
Returns a model of the diffuse filamentary network based
on the radial profiles.
Parameters
----------
max_radius : int, optional
Number of pixels to extend profiles to.
Returns
-------
model_image : numpy.ndarray
Array of the model
'''
if len(self.width_fits['Parameters']) == 0:
raise TypeError("Run profile fitting first!")
params = self.width_fits['Parameters']
scale = self.imgscale
# Create the distance transforms
all_fils = dist_transform(self.filament_arrays["final"],
self.skeleton)[0]
model_image = np.zeros(all_fils.shape)
for param, offset, fil_array in zip(params, self.array_offsets,
self.filament_arrays["final"]):
if np.isnan(param).any():
continue
# Avoid issues with the sizes of each filament array
full_size = np.ones(model_image.shape)
skel_posns = np.where(fil_array >= 1)
full_size[skel_posns[0] + offset[0][0],
skel_posns[1] + offset[0][1]] = 0
dist_array = distance_transform_edt(full_size)
posns = np.where(dist_array < max_radius)
model_image[posns] += \
(param[0] - param[2]) * \
np.exp(-np.power(dist_array[posns], 2) /
(2*(param[1]/scale)**2))
return model_image
def find_covering_fraction(self, max_radius=25):
'''
Compute the fraction of the intensity in the image contained in
the filamentary structure.
Parameters
----------
max_radius : int, optional
Passed to :method:`filament_model`
Attributes
----------
covering_fraction : float
Fraction of the total image intensity contained in the
filamentary structure (based on the local, individual fits)
'''
fil_model = self.filament_model(max_radius=max_radius)
self.covering_fraction = np.nansum(fil_model) / np.nansum(self.image)
return self
def save_table(self, table_type="csv", path=None, save_name=None,
save_branch_props=True, branch_table_type="hdf5",
hdf5_path="data"):
'''
The results of the algorithm are saved as an Astropy table in a 'csv',
'fits' or latex format.
Parameters
----------
table_type : str, optional
Sets the output type of the table. Supported options are
"csv", "fits", "latex" and "hdf5".
path : str, optional
The path where the file should be saved.
save_name : str, optional
The prefix for the saved file. If None, the save name specified
when ``fil_finder_2D`` was first called.
save_branch_props : bool, optional
When enabled, saves the lists of branch lengths and intensities
in a separate file(s). Default is enabled.
branch_table_type : str, optional
Any of the accepted table_types will work here. If using HDF5,
just one output file is created with each stored within it.
hdf5_path : str, optional
Path name within the HDF5 file.
Attributes
----------
dataframe : astropy.Table
The dataframe is returned for use with the ``Analysis`` class.
'''
if save_name is None:
save_name = self.save_name
save_name = save_name + "_table"
if table_type == "csv":
filename = save_name + ".csv"
elif table_type == "fits":
filename = save_name + ".fits"
elif table_type == "latex":
filename = save_name + ".tex"
elif table_type == "hdf5":
filename = save_name + ".hdf5"
else:
raise NameError("Only formats supported are 'csv', 'fits' \
and 'latex'.")
# If path is specified, append onto filename.
if path is not None:
filename = os.path.join(path, filename)
if not self._rht_branches_flag:
data = {"Lengths": self.lengths,
"Orientation": self.rht_curvature["Median"],
"Curvature": self.rht_curvature["IQR"],
"Branches": self.branch_properties["number"],
"Fit Type": self.width_fits["Type"],
"Total Intensity": self.total_intensity,
"Median Brightness": self.filament_brightness}
branch_data = \
{"Branch Length": self.branch_properties["length"],
"Branch Intensity": self.branch_properties["intensity"]}
else:
# RHT was ran on branches, and so can only be saved as a branch
# property due to the table shape
data = {"Lengths": self.lengths,
"Fit Type": self.width_fits["Type"],
"Total Intensity": self.total_intensity,
"Branches": self.branch_properties["number"],
"Median Brightness": self.filament_brightness}
branch_data = \
{"Branch Length": self.rht_curvature["Length"],
"Branch Intensity": self.rht_curvature["Intensity"],
"Curvature": self.rht_curvature["IQR"],
"Orientation": self.rht_curvature["Median"]}
for i, param in enumerate(self.width_fits["Names"]):
data[param] = self.width_fits["Parameters"][:, i]
data[param + " Error"] = self.width_fits["Errors"][:, i]
try_mkdir(self.save_name)
df = Table(data)
if table_type == "csv":
df.write(os.path.join(self.save_name, filename),
format="ascii.csv")
elif table_type == "fits":
df.write(os.path.join(self.save_name, filename))
elif table_type == "latex":
df.write(os.path.join(self.save_name, filename),
format="ascii.latex")
elif table_type == 'hdf5':
df.write(os.path.join(self.save_name, filename),
path=hdf5_path)
self.dataframe = df
for n in range(self.number_of_filaments):
branch_df = \
Table([branch_data[key][n] for key in branch_data.keys()],
names=branch_data.keys())
branch_filename = save_name + "_branch_" + str(n)
if branch_table_type == "csv":
branch_df.write(os.path.join(self.save_name,
branch_filename+".csv"),
format="ascii.csv")
elif branch_table_type == "fits":
branch_df.write(os.path.join(self.save_name,
branch_filename+".fits"))
elif branch_table_type == "latex":
branch_df.write(os.path.join(self.save_name,
branch_filename+".tex"),
format="ascii.latex")
elif branch_table_type == 'hdf5':
hdf_filename = save_name + "_branch"
if n == 0:
branch_df.write(os.path.join(self.save_name,
hdf_filename+".hdf5"),
path="branch_"+str(n))
else:
branch_df.write(os.path.join(self.save_name,
hdf_filename+".hdf5"),
path="branch_"+str(n),
append=True)
return self
def save_fits(self, save_name=None, stamps=False, filename=None,
model_save=True):
'''
This function saves the mask and the skeleton array as FITS files.
Included in the header are the setting used to create them.
Parameters
----------
save_name : str, optional
The prefix for the saved file. If None, the save name specified
when ``fil_finder_2D`` was first called.
stamps : bool, optional
Enables saving of individual stamps
filename : str, optional
File name of the image used. If None, assumes save_name is the
file name.
model_save : bool, optional
When enabled, calculates the model using :method:`filament_model`
and saves it in a FITS file.
'''
if save_name is None:
save_name = self.save_name
if not filename: # Assume save_name is filename if not specified.
filename = save_name
# Create header based off of image header.
new_hdr = deepcopy(self.header)
try: # delete the original history
del new_hdr["HISTORY"]
except KeyError:
pass
try:
new_hdr.update("BUNIT", value="bool", comment="")
except KeyError:
new_hdr["BUNIT"] = ("int", "")
new_hdr["COMMENT"] = "Mask created by fil_finder on " + \
time.strftime("%c")
new_hdr["COMMENT"] = \
"See fil_finder documentation for more info on parameter meanings."
new_hdr["COMMENT"] = "Smoothing Filter Size: " + \
str(self.smooth_size) + " pixels"
new_hdr["COMMENT"] = "Area Threshold: " + \
str(self.size_thresh) + " pixels^2"
new_hdr["COMMENT"] = "Global Intensity Threshold: " + \
str(self.glob_thresh) + " %"
new_hdr["COMMENT"] = "Size of Adaptive Threshold Patch: " + \
str(self.adapt_thresh) + " pixels"
new_hdr["COMMENT"] = "Original file name: " + filename
# Remove padding
mask = self.mask[self.pad_size:-self.pad_size,
self.pad_size:-self.pad_size]
try_mkdir(self.save_name)
# Save mask
fits.writeto(os.path.join(self.save_name,
"".join([save_name, "_mask.fits"])),
mask.astype(">i2"), new_hdr)
# Save skeletons. Includes final skeletons and the longest paths.
try:
new_hdr.update("BUNIT", value="int", comment="")
except KeyError:
new_hdr["BUNIT"] = ("int", "")
new_hdr["COMMENT"] = "Skeleton Size Threshold: " + \
str(self.skel_thresh)
new_hdr["COMMENT"] = "Branch Size Threshold: " + \
str(self.branch_thresh)
hdu_skel = fits.HDUList()
# Final Skeletons - create labels which match up with table output
# Remove padding
skeleton = self.skeleton[self.pad_size:-self.pad_size,
self.pad_size:-self.pad_size]
skeleton_long = self.skeleton_longpath[self.pad_size:-self.pad_size,
self.pad_size:-self.pad_size]
labels = nd.label(skeleton, eight_con())[0]
hdu_skel.append(fits.PrimaryHDU(labels.astype(">i2"), header=new_hdr))
# Longest Paths
labels_lp = nd.label(skeleton_long, eight_con())[0]
hdu_skel.append(fits.PrimaryHDU(labels_lp.astype(">i2"),
header=new_hdr))
try_mkdir(self.save_name)
hdu_skel.writeto(os.path.join(self.save_name,
"".join([save_name, "_skeletons.fits"])))
if stamps:
# Save stamps of all images. Include portion of image and the
# skeleton for reference.
try_mkdir(self.save_name)
# Make a directory for the stamps
out_dir = \
os.path.join(self.save_name, "stamps_" + save_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
final_arrays = self.filament_arrays["final"]
longpath_arrays = self.filament_arrays["long path"]
for n, (offset, skel_arr, lp_arr) in \
enumerate(zip(self.array_offsets,
final_arrays,
longpath_arrays)):
xlow, ylow = (offset[0][0], offset[0][1])
xhigh, yhigh = (offset[1][0], offset[1][1])
# Create stamp
img_stamp = self.image[xlow:xhigh,
ylow:yhigh]
# ADD IN SOME HEADERS!
prim_hdr = deepcopy(self.header)
prim_hdr["COMMENT"] = "Outputted from fil_finder."
prim_hdr["COMMENT"] = \
"Extent in original array: (" + \
str(xlow + self.pad_size) + "," + \
str(ylow + self.pad_size) + ")->" + \
"(" + str(xhigh - self.pad_size) + \
"," + str(yhigh - self.pad_size) + ")"
hdu = fits.HDUList()
# Image stamp
hdu.append(fits.PrimaryHDU(img_stamp.astype(">f4"),
header=prim_hdr))
# Stamp of final skeleton
try:
prim_hdr.update("BUNIT", value="bool", comment="")
except KeyError:
prim_hdr["BUNIT"] = ("int", "")
hdu.append(fits.PrimaryHDU(skel_arr.astype(">i2"),
header=prim_hdr))
# Stamp of longest path
hdu.append(fits.PrimaryHDU(lp_arr.astype(">i2"),
header=prim_hdr))
hdu.writeto(os.path.join(out_dir,
save_name+"_object_"+str(n+1)+".fits"))
if model_save:
model = self.filament_model()
# Remove the padding
model = model[self.pad_size:-self.pad_size,
self.pad_size:-self.pad_size]
model_hdr = new_hdr.copy()
try:
model_hdr.update("BUNIT", value=self.header['BUNIT'], comment="")
except KeyError:
Warning("No BUNIT specified in original header.")
model_hdu = fits.PrimaryHDU(model.astype(">f4"), header=model_hdr)
try_mkdir(self.save_name)
model_hdu.writeto(
os.path.join(self.save_name,
"".join([save_name, "_filament_model.fits"])))
return self
def __str__(self):
print("%s filaments found.") % (self.number_of_filaments)
for fil in range(self.number_of_filaments):
print "Filament: %s, Width: %s, Length: %s, Curvature: %s,\
Orientation: %s" % \
(fil, self.width_fits["Parameters"][fil, -1][fil],
self.lengths[fil], self.rht_curvature["Std"][fil],
self.rht_curvature["Std"][fil])
def run(self, verbose=False, save_name=None, save_png=False,
table_type="fits"):
'''
The whole algorithm in one easy step. Individual parameters have not
been included in this batch run. If fine-tuning is needed, it is
recommended to run each step individually.
Parameters
----------
verbose : bool, optional
Enables the verbose option for each of the steps.
save_name : str, optional
The prefix for the saved file.
If None, the name from the header is used.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
table_type : str, optional
Sets the output type of the table. Supported options are
"csv", "fits" and "latex".
'''
if save_name is None:
save_name = self.save_name
self.create_mask(verbose=verbose, save_png=save_png)
self.medskel(verbose=verbose, save_png=save_png)
self.analyze_skeletons(verbose=verbose, save_png=save_png)
self.exec_rht(verbose=verbose, save_png=save_png)
self.find_widths(verbose=verbose, save_png=save_png)
self.compute_filament_brightness()
self.find_covering_fraction()
self.save_table(save_name=save_name, table_type=table_type)
self.save_fits(save_name=save_name, stamps=False)
if verbose:
self.__str__()
# if save_plots:
# Analysis(self.dataframe, save_name=save_name).make_hists()
# ImageAnalysis(self.image, self.mask, skeleton=self.skeleton, save_name=save_name)
return self
|
mit
|
timestocome/Test-stock-prediction-algorithms
|
Misc experiments/PortfolioOptimizationOfIndexFunds.py
|
1
|
6507
|
# http://github.com/timestocome
# Chpt 11-Portfolio Optimization in Python for Finance, O'Reilly
# Not sure what to say about this stuff. It's a good review of how
# things used to be done and from what I've seen is still in use by
# some mutual funds which is why you should buy index funds or do your
# own investing.
import pandas as pd
import numpy as np
import numpy.random as npr
import scipy.stats as scs
import scipy.optimize as sco
import statsmodels.api as sm
import matplotlib.pyplot as plt
import matplotlib as matplotlib
# read in file
def read_data(file_name):
stock = pd.read_csv(file_name, parse_dates=True, index_col=0) # 31747 days of data
n_samples = len(stock)
# ditch samples with NAN values
stock = stock.dropna(axis=0)
# flip order from newest to oldest to oldest to newest
stock = stock.iloc[::-1]
# trim data
stock = stock[['Open']]
# trim dates
stock = stock.loc[stock.index > '01-01-1990']
stock = stock.loc[stock.index < '12-31-2016']
# all stock is needed to walk back dates for testing hold out data
return stock
#############################################################################################
# load and combine stock indexes
dow_jones = read_data('data/djia.csv')
print("Loaded DJIA", len(dow_jones))
s_p = read_data('data/S&P.csv')
print("Loaded S&P", len(s_p))
russell_2000 = read_data('data/Russell2000.csv')
print("Loaded Russell", len(russell_2000))
nasdaq = read_data('data/nasdaq.csv')
print("Loaded NASDAQ", len(nasdaq))
# combine stock indexes into one dataframe
data = pd.concat([dow_jones['Open'], s_p['Open'], russell_2000['Open'], nasdaq['Open']], axis=1, keys=['dow_jones', 'S&P', 'russell_2000', 'nasdaq'])
# compare indexes
(data / data.ix[0] * 100).plot(figsize=(12,12))
plt.title("Standarized Indexes 1990-2016")
plt.show()
########################################################################################
# log returns
# continuous rate e^(rt), using log normalizes returns
# shows differences in returns on the 4 indexes
def print_statistics(array):
sta = scs.describe(array)
print("%14s %15s" % ('statistic', 'value'))
print(30 * '-')
print("%14s %15.5f" % ('size', sta[0]))
print("%14s %15.5f" % ('min', sta[1][0]))
print("%14s %15.5f" % ('max', sta[1][1]))
print("%14s %15.5f" % ('mean', sta[2] ))
print("%14s %15.5f" % ('std', np.sqrt(sta[3])))
print("%14s %15.5f" % ('skew', sta[4]))
print("%14s %15.5f" % ('kutosis', sta[5]))
log_returns = np.log(data / data.shift(1))
# compare indexes
annualized_returns = log_returns.mean() * 252
print("Annualized returns 1990-2016")
print(annualized_returns)
# plot
log_returns.hist(bins=50, figsize=(12,12))
plt.suptitle("Histogram of log returns")
plt.show()
# print to screen for user
stocks = data.columns.values
for stock in stocks:
print(30 * '-')
print ("\n Results for index %s" % stock)
log_data = np.array(log_returns[stock].dropna())
print_statistics(log_data)
###############################################################################
# Quantile-quantile - calculate and plot
# shows fat tail distribution in all 4 of these indexes
# note curves on both ends
sm.qqplot(log_returns['dow_jones'].dropna(), line='s')
plt.grid(True)
plt.xlabel('theoretical quantiles')
plt.ylabel('sample quantiles')
plt.title('Quantiles Dow Jones')
plt.show()
sm.qqplot(log_returns['S&P'].dropna(), line='s')
plt.grid(True)
plt.xlabel('theoretical quantiles')
plt.ylabel('sample quantiles')
plt.title('Quantiles S&P')
plt.show()
sm.qqplot(log_returns['russell_2000'].dropna(), line='s')
plt.grid(True)
plt.xlabel('theoretical quantiles')
plt.ylabel('sample quantiles')
plt.title('Quantiles Russell 2000')
plt.show()
sm.qqplot(log_returns['nasdaq'].dropna(), line='s')
plt.grid(True)
plt.xlabel('theoretical quantiles')
plt.ylabel('sample quantiles')
plt.title('Quantiles Nasdaq')
plt.show()
#########################################################################################
# portfolio balancing 1-1-1990, 12-31-2016
# 252 is the estimated trading days in one year
# try equal investments
covariance_indexes = log_returns.cov() * 252
print(30 * '-')
print("Covariance matrix for indexes")
print(covariance_indexes)
print(30 * '-')
# invest 25% of your money in each of the 4 indexes
weights = np.asarray([.25, .25, .25, .25])
portfolio_return = np.sum(log_returns.mean() * weights) * 252
print(30 * '-')
print("return on equal weight portfolio", portfolio_return)
portfolio_variance = np.dot(weights.T, np.dot(log_returns.cov() * 252, weights))
print("expected portfolio variance on returns", portfolio_variance)
print("expected returns %.2lf to %.2lf" %(portfolio_return - portfolio_variance, portfolio_return + portfolio_variance))
print(30 * '-')
##############################################################################
# use optimization to get best weighting for portfolio
# weights must add up to 1 ( 100% of money invested)
noa = len(stocks)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bounds = tuple((0, 1) for x in range(noa))
def statistics(weights):
weights = np.array(weights)
p_return = np.sum(log_returns.mean() * weights) * 252
p_volatility = np.sqrt(np.dot(weights.T, np.dot(log_returns.cov() * 252, weights)))
return (np.array([p_return, p_volatility, p_return/p_volatility]))
# maximize mean of returns
# http://www.investopedia.com/terms/s/sharperatio.asp
# Sharpe ratio === (expected_return - current_savings_rate ) / (std_of_portfolio)
def min_func_sharpe(weights):
return -statistics(weights)[2]
optimal_values = sco.minimize(min_func_sharpe, noa * [1./noa,], method='SLSQP', bounds=bounds, constraints=constraints)
print("Optimal value portfolio")
print(optimal_values)
print("Optimal weights for maximum return", optimal_values['x'].round(3))
print("Expected return, volatility, Sharpe ratio", statistics(optimal_values['x'].round(3)))
print(30 * '-')
# minimize variance
def min_func_variance(weights):
return statistics(weights)[1] **2
optimal_variance = sco.minimize(min_func_variance, noa * [1. / noa,], method='SLSQP', bounds=bounds, constraints=constraints)
print("Optimal variance portfolio")
print(optimal_variance)
print("Optimal weights for low variance", optimal_variance['x'].round(3))
print("Expected return, volatility, Sharpe ratio", statistics(optimal_variance['x'].round(3)))
print(30 * '-')
|
mit
|
pomack/pychecker
|
pychecker/pcmodules.py
|
1
|
22174
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
"""
Track loaded PyCheckerModules together with the directory they were loaded from.
This allows us to differentiate between loaded modules with the same name
but from different paths, in a way that sys.modules doesn't do.
"""
import re
import sys
import imp
import types
import string
from pychecker import utils, function, Config, OP
# Constants
_DEFAULT_MODULE_TOKENS = ('__builtins__', '__doc__', '__file__', '__name__',
'__path__')
_DEFAULT_CLASS_TOKENS = ('__doc__', '__name__', '__module__')
# When using introspection on objects from some C extension modules,
# the interpreter will crash. Since pychecker exercises these bugs we
# need to blacklist the objects and ignore them. For more info on how
# to determine what object is causing the crash, search for this
# comment below (ie, it is also several hundred lines down):
#
# README if interpreter is crashing:
# FIXME: the values should indicate the versions of these modules
# that are broken. We shouldn't ignore good modules.
EVIL_C_OBJECTS = {
'matplotlib.axes.BinOpType': None, # broken on versions <= 0.83.2
# broken on versions at least 2.5.5 up to 2.6
'wx.TheClipboard': None,
'wx._core.TheClipboard': None,
'wx._misc.TheClipboard': None,
}
__pcmodules = {}
def _filterDir(object, ignoreList):
"""
Return a list of attribute names of an object, excluding the ones
in ignoreList.
@type ignoreList: list of str
@rtype: list of str
"""
tokens = dir(object)
for token in ignoreList:
if token in tokens:
tokens.remove(token)
return tokens
def _getClassTokens(c):
return _filterDir(c, _DEFAULT_CLASS_TOKENS)
def _getPyFile(filename):
"""Return the file and '.py' filename from a filename which could
end with .py, .pyc, or .pyo"""
if filename[-1] in 'oc' and filename[-4:-1] == '.py':
return filename[:-1]
return filename
def _getModuleTokens(m):
return _filterDir(m, _DEFAULT_MODULE_TOKENS)
class Variable:
"Class to hold all information about a variable"
def __init__(self, name, type):
"""
@param name: name of the variable
@type name: str
@param type: type of the variable
@type type: type
"""
self.name = name
self.type = type
self.value = None
def __str__(self) :
return self.name
__repr__ = utils.std_repr
class Class:
"""
Class to hold all information about a class.
@ivar name: name of class
@type name: str
@ivar classObject: the object representing the class
@type classObject: class
@ivar module: the module where the class is defined
@type module: module
@ivar ignoreAttrs: whether to ignore this class's attributes when checking
attributes. Can be set because of a bad __getattr__
or because the module this class comes from is
blacklisted.
@type ignoreAttrs: int (used as bool)
@type methods: dict
@type members: dict of str -> type
@type memberRefs: dict
@type statics: dict
@type lineNums: dict
"""
def __init__(self, name, pcmodule):
"""
@type name: str
@type pcmodule: L{PyCheckerModule}
"""
self.name = name
module = pcmodule.module
self.classObject = getattr(module, name)
modname = getattr(self.classObject, '__module__', None)
if modname is None:
# hm, some ExtensionClasses don't have a __module__ attribute
# so try parsing the type output
typerepr = repr(type(self.classObject))
mo = re.match("^<type ['\"](.+)['\"]>$", typerepr)
if mo:
modname = ".".join(mo.group(1).split(".")[:-1])
# TODO(nnorwitz): this check for __name__ might not be necessary
# any more. Previously we checked objects as if they were classes.
# This problem is fixed by not adding objects as if they are classes.
# zope.interface for example has Provides and Declaration that
# look a lot like class objects but do not have __name__
if not hasattr(self.classObject, '__name__'):
if modname not in utils.cfg().blacklist:
sys.stderr.write("warning: no __name__ attribute "
"for class %s (module name: %s)\n"
% (self.classObject, modname))
self.classObject.__name__ = name
# later pychecker code uses this
self.classObject__name__ = self.classObject.__name__
self.module = sys.modules.get(modname)
# if the pcmodule has moduleDir, it means we processed it before,
# and deleted it from sys.modules
if not self.module and pcmodule.moduleDir is None:
self.module = module
if modname not in utils.cfg().blacklist:
sys.stderr.write("warning: couldn't find real module "
"for class %s (module name: %s)\n"
% (self.classObject, modname))
self.ignoreAttrs = 0
self.methods = {}
self.members = { '__class__': types.ClassType,
'__doc__': types.StringType,
'__dict__': types.DictType, }
self.memberRefs = {}
self.statics = {}
self.lineNums = {}
def __str__(self) :
return self.name
__repr__ = utils.std_repr
def getFirstLine(self) :
"Return first line we can find in THIS class, not any base classes"
lineNums = []
classDir = dir(self.classObject)
for m in self.methods.values() :
if m != None and m.function.func_code.co_name in classDir:
lineNums.append(m.function.func_code.co_firstlineno)
if lineNums :
return min(lineNums)
return 0
def allBaseClasses(self, c = None) :
"Return a list of all base classes for this class and its subclasses"
baseClasses = []
if c == None :
c = self.classObject
for base in getattr(c, '__bases__', None) or ():
baseClasses = baseClasses + [ base ] + self.allBaseClasses(base)
return baseClasses
def __getMethodName(self, func_name, className = None) :
if func_name[0:2] == '__' and func_name[-2:] != '__' :
if className == None :
className = self.name
if className[0] != '_' :
className = '_' + className
func_name = className + func_name
return func_name
def addMethod(self, methodName, method=None):
"""
Add the given method to this class by name.
@type methodName: str
@type method: method or None
"""
if not method:
self.methods[methodName] = None
else :
self.methods[methodName] = function.Function(method, 1)
def addMethods(self, classObject):
"""
Add all methods for this class object to the class.
@param classObject: the class object to add methods from.
@type classObject: types.ClassType (classobj)
"""
for classToken in _getClassTokens(classObject):
token = getattr(classObject, classToken, None)
if token is None:
continue
# Looks like a method. Need to code it this way to
# accommodate ExtensionClass and Python 2.2. Yecchh.
if (hasattr(token, "func_code") and
hasattr(token.func_code, "co_argcount")):
self.addMethod(token.__name__, method=token)
elif hasattr(token, '__get__') and \
not hasattr(token, '__set__') and \
type(token) is not types.ClassType:
self.addMethod(getattr(token, '__name__', classToken))
else:
self.members[classToken] = type(token)
self.memberRefs[classToken] = None
self.cleanupMemberRefs()
# add standard methods
for methodName in ('__class__', ):
self.addMethod(methodName)
def addMembers(self, classObject) :
if not utils.cfg().onlyCheckInitForMembers :
for classToken in _getClassTokens(classObject) :
method = getattr(classObject, classToken, None)
if type(method) == types.MethodType :
self.addMembersFromMethod(method.im_func)
else:
try:
self.addMembersFromMethod(classObject.__init__.im_func)
except AttributeError:
pass
def addMembersFromMethod(self, method) :
if not hasattr(method, 'func_code') :
return
func_code, code, i, maxCode, extended_arg = OP.initFuncCode(method)
stack = []
while i < maxCode :
op, oparg, i, extended_arg = OP.getInfo(code, i, extended_arg)
if op >= OP.HAVE_ARGUMENT :
operand = OP.getOperand(op, func_code, oparg)
if OP.LOAD_CONST(op) or OP.LOAD_FAST(op) or OP.LOAD_GLOBAL(op):
stack.append(operand)
elif OP.LOAD_DEREF(op):
try:
operand = func_code.co_cellvars[oparg]
except IndexError:
index = oparg - len(func_code.co_cellvars)
operand = func_code.co_freevars[index]
stack.append(operand)
elif OP.STORE_ATTR(op) :
if len(stack) > 0 :
if stack[-1] == utils.cfg().methodArgName:
value = None
if len(stack) > 1 :
value = type(stack[-2])
self.members[operand] = value
self.memberRefs[operand] = None
stack = []
self.cleanupMemberRefs()
def cleanupMemberRefs(self) :
try :
del self.memberRefs[Config.CHECKER_VAR]
except KeyError :
pass
def abstractMethod(self, m):
"""Return 1 if method is abstract, None if not
An abstract method always raises an exception.
"""
if not self.methods.get(m, None):
return None
funcCode, codeBytes, i, maxCode, extended_arg = \
OP.initFuncCode(self.methods[m].function)
# abstract if the first opcode is RAISE_VARARGS and it raises
# NotImplementedError
arg = ""
while i < maxCode:
op, oparg, i, extended_arg = OP.getInfo(codeBytes, i, extended_arg)
if OP.LOAD_GLOBAL(op):
arg = funcCode.co_names[oparg]
elif OP.RAISE_VARARGS(op):
# if we saw NotImplementedError sometime before the raise
# assume it's related to this raise stmt
return arg == "NotImplementedError"
if OP.conditional(op):
break
return None
def isAbstract(self):
"""Return the method names that make a class abstract.
An abstract class has at least one abstract method."""
result = []
for m in self.methods.keys():
if self.abstractMethod(m):
result.append(m)
return result
class PyCheckerModule:
"""
Class to hold all information for a module
@ivar module: the module wrapped by this PyCheckerModule
@type module: module
@ivar moduleName: name of the module
@type moduleName: str
@ivar moduleDir: if specified, the directory where the module can
be loaded from; allows discerning between modules
with the same name in a different directory.
Note that moduleDir can be the empty string, if
the module being tested lives in the current working
directory.
@type moduleDir: str
@ivar variables: dict of variable name -> Variable
@type variables: dict of str -> L{Variable}
@ivar functions: dict of function name -> function
@type functions: dict of str -> L{function.Function}
@ivar classes: dict of class name -> class
@type classes: dict of str -> L{Class}
@ivar modules: dict of module name -> module
@type modules: dict of str -> L{PyCheckerModule}
@ivar moduleLineNums: mapping of the module's nameds/operands to the
filename and linenumber where they are created
@type moduleLineNums: dict of str -> (str, int)
@type mainCode: L{function.Function}
@ivar check: whether this module should be checked
@type check: int (used as bool)
"""
def __init__(self, moduleName, check=1, moduleDir=None):
"""
@param moduleName: name of the module
@type moduleName: str
@param check: whether this module should be checked
@type check: int (used as bool)
@param moduleDir: if specified, the directory where the module can
be loaded from; allows discerning between modules
with the same name in a different directory.
Note that moduleDir can be the empty string, if
the module being tested lives in the current working
directory.
@type moduleDir: str
"""
self.module = None
self.moduleName = moduleName
self.moduleDir = moduleDir
self.variables = {}
self.functions = {}
self.classes = {}
self.modules = {}
self.moduleLineNums = {}
self.attributes = [ '__dict__' ]
self.mainCode = None
self.check = check
# key on a combination of moduleName and moduleDir so we have separate
# entries for modules with the same name but in different directories
addPCModule(self)
def __str__(self):
return self.moduleName
__repr__ = utils.std_repr
def addVariable(self, var, varType):
"""
@param var: name of the variable
@type var: str
@param varType: type of the variable
@type varType: type
"""
self.variables[var] = Variable(var, varType)
def addFunction(self, func):
"""
@type func: callable
"""
self.functions[func.__name__] = function.Function(func)
def __addAttributes(self, c, classObject) :
for base in getattr(classObject, '__bases__', None) or ():
self.__addAttributes(c, base)
c.addMethods(classObject)
c.addMembers(classObject)
def addClass(self, name):
self.classes[name] = c = Class(name, self)
try:
objName = utils.safestr(c.classObject)
except TypeError:
# this can happen if there is a goofy __getattr__
c.ignoreAttrs = 1
else:
packages = string.split(objName, '.')
c.ignoreAttrs = packages[0] in utils.cfg().blacklist
if not c.ignoreAttrs :
self.__addAttributes(c, c.classObject)
def addModule(self, name, moduleDir=None) :
module = getPCModule(name, moduleDir)
if module is None :
self.modules[name] = module = PyCheckerModule(name, 0)
if imp.is_builtin(name) == 0:
module.load()
else :
globalModule = globals().get(name)
if globalModule :
module.attributes.extend(dir(globalModule))
else :
self.modules[name] = module
def filename(self) :
try :
filename = self.module.__file__
except AttributeError :
filename = self.moduleName
# FIXME: we're blindly adding .py, but it might be something else.
if self.moduleDir:
filename = self.moduleDir + '/' + filename + '.py'
return _getPyFile(filename)
def load(self):
try :
# there's no need to reload modules we already have if no moduleDir
# is specified for this module
# NOTE: self.moduleDir can be '' if the module tested lives in
# the current working directory
if self.moduleDir is None:
module = sys.modules.get(self.moduleName)
if module:
pcmodule = getPCModule(self.moduleName)
if not pcmodule.module:
return self._initModule(module)
return 1
return self._initModule(self.setupMainCode())
except (SystemExit, KeyboardInterrupt):
exc_type, exc_value, exc_tb = sys.exc_info()
raise exc_type, exc_value
except:
utils.importError(self.moduleName, self.moduleDir)
return utils.cfg().ignoreImportErrors
def initModule(self, module) :
if not self.module:
filename = _getPyFile(module.__file__)
if string.lower(filename[-3:]) == '.py':
try:
handle = open(filename)
except IOError:
pass
else:
self._setupMainCode(handle, filename, module)
return self._initModule(module)
return 1
def _initModule(self, module):
self.module = module
self.attributes = dir(self.module)
# interpret module-specific suppressions
pychecker_attr = getattr(module, Config.CHECKER_VAR, None)
if pychecker_attr is not None :
utils.pushConfig()
utils.updateCheckerArgs(pychecker_attr, 'suppressions', 0, [])
# read all tokens from the real module, and register them
for tokenName in _getModuleTokens(self.module):
if EVIL_C_OBJECTS.has_key('%s.%s' % (self.moduleName, tokenName)):
continue
# README if interpreter is crashing:
# Change 0 to 1 if the interpretter is crashing and re-run.
# Follow the instructions from the last line printed.
if utils.cfg().findEvil:
print "Add the following line to EVIL_C_OBJECTS or the string to evil in a config file:\n" \
" '%s.%s': None, " % (self.moduleName, tokenName)
token = getattr(self.module, tokenName)
if isinstance(token, types.ModuleType) :
# get the real module name, tokenName could be an alias
self.addModule(token.__name__)
elif isinstance(token, types.FunctionType) :
self.addFunction(token)
elif isinstance(token, types.ClassType) or \
hasattr(token, '__bases__') and \
issubclass(type(token), type):
self.addClass(tokenName)
else :
self.addVariable(tokenName, type(token))
if pychecker_attr is not None :
utils.popConfig()
return 1
def setupMainCode(self):
handle, filename, smt = utils.findModule(
self.moduleName, self.moduleDir)
# FIXME: if the smt[-1] == imp.PKG_DIRECTORY : load __all__
# HACK: to make sibling imports work, we add self.moduleDir to sys.path
# temporarily, and remove it later
if self.moduleDir is not None:
oldsyspath = sys.path[:]
sys.path.insert(0, self.moduleDir)
module = imp.load_module(self.moduleName, handle, filename, smt)
if self.moduleDir is not None:
sys.path = oldsyspath
# to make sure that subsequent modules with the same moduleName
# do not persist, and get their namespace clobbered, delete it
del sys.modules[self.moduleName]
self._setupMainCode(handle, filename, module)
return module
def _setupMainCode(self, handle, filename, module):
try:
self.mainCode = function.create_from_file(handle, filename, module)
finally:
if handle != None:
handle.close()
def getToken(self, name):
"""
Looks up the given name in this module's namespace.
@param name: the name of the token to look up in this module.
@rtype: one of L{Variable}, L{function.Function}, L{Class},
L{PyCheckerModule}, or None
"""
if name in self.variables:
return self.variables[name]
elif name in self.functions:
return self.functions[name]
elif name in self.classes:
return self.classes[name]
elif name in self.modules:
return self.modules[name]
return None
def getPCModule(moduleName, moduleDir=None):
"""
@type moduleName: str
@param moduleDir: if specified, the directory where the module can
be loaded from; allows discerning between modules
with the same name in a different directory.
Note that moduleDir can be the empty string, if
the module being tested lives in the current working
directory.
@type moduleDir: str
@rtype: L{pychecker.checker.PyCheckerModule}
"""
global __pcmodules
return __pcmodules.get((moduleName, moduleDir), None)
def getPCModules():
"""
@rtype: list of L{pychecker.checker.PyCheckerModule}
"""
global __pcmodules
return __pcmodules.values()
def addPCModule(pcmodule):
"""
@type pcmodule: L{pychecker.checker.PyCheckerModule}
"""
global __pcmodules
__pcmodules[(pcmodule.moduleName, pcmodule.moduleDir)] = pcmodule
|
bsd-3-clause
|
GitYiheng/reinforcement_learning_test
|
test03_monte_carlo/t45_vps01_030620032018.py
|
1
|
7599
|
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
start_time = time.time()
max_test = 10
for test_num in range(1, max_test):
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
ENVIRONMENT1 = "morph-v0"
MAX_EPISODES = 5000 # number of episodes
EPISODE_LENGTH = 200 # single episode length
HIDDEN_SIZE = 16
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
CONST_LR = False # Constant or decaying learing rate
# Constant learning rate
const_learning_rate_in = 0.006
# Decay learning rate
start_learning_rate_in = 0.005
decay_steps_in = 100
decay_rate_in = 0.92
DIR_PATH_SAVEFIG = "/root/cartpole_plot/"
if CONST_LR:
learning_rate = const_learning_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(learning_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
else:
start_learning_rate = start_learning_rate_in
decay_steps = decay_steps_in
decay_rate = decay_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_dlr_slr" + str(start_learning_rate).replace(".", "p") \
+ "_ds" + str(decay_steps) \
+ "_dr" + str(decay_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = 4
output_size = 2
# input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
if not CONST_LR:
# decay learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_rate, staircase=False)
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
if CONST_LR:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi, global_step=global_step)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
if CONST_LR:
msg = "Test: {}, Episode: {}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, ep+1, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), learning_rate, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
else:
msg = "Test: {}, Episode: {}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, ep+1, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), sess.run(learning_rate), raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close() # close openai gym environment
tf.reset_default_graph() # clear tensorflow graph
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
episodes_plot = np.arange(MAX_EPISODES)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
if CONST_LR:
ax.set_title("The Cart-Pole Problem Test %i \n \
Episode Length: %i \
Discount Factor: %.2f \n \
Number of Hidden Neuron: %i \
Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, learning_rate))
else:
ax.set_title("The Cart-Pole Problem Test %i \n \
EpisodeLength: %i DiscountFactor: %.2f NumHiddenNeuron: %i \n \
Decay Learning Rate: (start: %.5f, steps: %i, rate: %.2f)" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, start_learning_rate, decay_steps, decay_rate))
ax.set_xlabel("Episode")
ax.set_ylabel("Return")
ax.set_ylim((0, EPISODE_LENGTH))
ax.grid(linestyle='--')
ax.plot(episodes_plot, returns, label='Instant return')
ax.plot(episodes_plot, mean_returns, label='Averaged return')
legend = ax.legend(loc='best', shadow=True)
fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
|
mit
|
kiyoto/statsmodels
|
statsmodels/sandbox/examples/ex_mixed_lls_timecorr.py
|
34
|
7824
|
# -*- coding: utf-8 -*-
"""Example using OneWayMixed with within group intertemporal correlation
Created on Sat Dec 03 10:15:55 2011
Author: Josef Perktold
This example constructs a linear model with individual specific random
effects, and uses OneWayMixed to estimate it.
This is a variation on ex_mixed_lls_0.py.
Here we use time dummies as random effects (all except 1st time period).
I think, this should allow for (almost) arbitrary intertemporal correlation.
The assumption is that each unit can have different constants, however the
intertemporal covariance matrix is the same for all units. One caveat, to
avoid singular matrices, we have to treat one time period differently.
Estimation requires that the number of units is larger than the number of
time periods. Also, it requires that we have the same number of periods for
each unit.
I needed to remove the first observation from the time dummies to avoid a
singular matrix. So, interpretation of time effects should be relative to
first observation. (I didn't check the math.)
TODO:
Note, I don't already have constant in X. Constant for first
time observation is missing.
Do I need all dummies in exog_fe, Z, but not in exog_re, Z? Tried this and
it works.
In the error decomposition we also have the noise variable, I guess this works
like constant, so we get full rank (square) with only T-1 time dummies.
But we don't get correlation with the noise, or do we? conditional?
-> sample correlation of estimated random effects looks a bit high,
upward bias? or still some problems with initial condition?
correlation from estimated cov_random looks good.
Since we include the time dummies also in the fixed effect, we can have
arbitrary trends, different constants in each period.
Intertemporal correlation in data generating process, DGP, to see if
the results correctly estimate it.
used AR(1) as example, but only starting at second period. (?)
Note: we don't impose AR structure in the estimation
"""
import numpy as np
from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit
examples = ['ex1']
if 'ex1' in examples:
#np.random.seed(54321)
#np.random.seed(978326)
nsubj = 200
units = []
nobs_i = 8 #number of observations per unit, changed below
nx = 1 #number fixed effects
nz = nobs_i - 1 ##number random effects
beta = np.ones(nx)
gamma = 0.5 * np.ones(nz) #mean of random effect
#gamma[0] = 0
gamma_re_true = []
for i in range(nsubj):
#create data for one unit
#random effect/coefficient
use_correlated = True
if not use_correlated:
gamma_re = gamma + 0.2 * np.random.standard_normal(nz)
else:
#coefficients are AR(1) for all but first time periods
from scipy import linalg as splinalg
rho = 0.6
corr_re = splinalg.toeplitz(rho**np.arange(nz))
rvs = np.random.multivariate_normal(np.zeros(nz), corr_re)
gamma_re = gamma + 0.2 * rvs
#store true parameter for checking
gamma_re_true.append(gamma_re)
#generate exogenous variables
X = np.random.standard_normal((nobs_i, nx))
#try Z should be time dummies
time_dummies = (np.arange(nobs_i)[:, None] == np.arange(nobs_i)[None, :]).astype(float)
Z = time_dummies[:,1:]
# Z = np.random.standard_normal((nobs_i, nz-1))
# Z = np.column_stack((np.ones(nobs_i), Z))
noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1
#generate endogenous variable
Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise
#add random effect design matrix also to fixed effects to
#capture the mean
#this seems to be necessary to force mean of RE to zero !?
#(It's not required for estimation but interpretation of random
#effects covariance matrix changes - still need to check details.
#X = np.hstack((X,Z))
X = np.hstack((X, time_dummies))
#create units and append to list
unit = Unit(Y, X, Z)
units.append(unit)
m = OneWayMixed(units)
import time
t0 = time.time()
m.initialize()
res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6)
t1 = time.time()
print('time for initialize and fit', t1-t0)
print('number of iterations', m.iterations)
#print dir(m)
#print vars(m)
print('\nestimates for fixed effects')
print(m.a)
print(m.params)
bfixed_cov = m.cov_fixed()
print('beta fixed standard errors')
print(np.sqrt(np.diag(bfixed_cov)))
print(m.bse)
b_re = m.params_random_units
print('RE mean:', b_re.mean(0))
print('RE columns std', b_re.std(0))
print('np.cov(b_re, rowvar=0), sample statistic')
print(np.cov(b_re, rowvar=0))
print('sample correlation of estimated random effects')
print(np.corrcoef(b_re, rowvar=0))
print('std of above')
#need atleast_1d or diag raises exception
print(np.sqrt(np.diag(np.atleast_1d(np.cov(b_re, rowvar=0)))))
print('m.cov_random()')
print(m.cov_random())
print('correlation from above')
print(res.cov_random()/ res.std_random()[:,None] /res.std_random())
print('std of above')
print(res.std_random())
print(np.sqrt(np.diag(m.cov_random())))
print('\n(non)convergence of llf')
print(m.history['llf'][-4:])
print('convergence of parameters')
#print np.diff(np.vstack(m.history[-4:])[:,1:],axis=0)
print(np.diff(np.vstack(m.history['params'][-4:]),axis=0))
print('convergence of D')
print(np.diff(np.array(m.history['D'][-4:]), axis=0))
#zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units])
zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units])
'''if Z is not included in X:
>>> np.dot(b_re.T, b_re)/100
array([[ 0.03270611, -0.00916051],
[-0.00916051, 0.26432783]])
>>> m.cov_random()
array([[ 0.0348722 , -0.00909159],
[-0.00909159, 0.26846254]])
>>> #note cov_random doesn't subtract mean!
'''
print('\nchecking the random effects distribution and prediction')
gamma_re_true = np.array(gamma_re_true)
print('mean of random effect true', gamma_re_true.mean(0))
print('mean from fixed effects ', m.params[-2:])
print('mean of estimated RE ', b_re.mean(0))
print()
absmean_true = np.abs(gamma_re_true).mean(0)
mape = ((m.params[-nz:] + b_re) / gamma_re_true - 1).mean(0)*100
mean_abs_perc = np.abs((m.params[-nz:] + b_re) - gamma_re_true).mean(0) \
/ absmean_true*100
median_abs_perc = np.median(np.abs((m.params[-nz:] + b_re) - gamma_re_true), 0) \
/ absmean_true*100
rmse_perc = ((m.params[-nz:] + b_re) - gamma_re_true).std(0) \
/ absmean_true*100
print('mape ', mape)
print('mean_abs_perc ', mean_abs_perc)
print('median_abs_perc', median_abs_perc)
print('rmse_perc (std)', rmse_perc)
from numpy.testing import assert_almost_equal
#assert is for n_units=100 in original example
#I changed random number generation, so this won't work anymore
#assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8)
#now returns res
print('llf', res.llf) #based on MLE, does not include constant
print('tvalues', res.tvalues)
print('pvalues', res.pvalues)
rmat = np.zeros(len(res.params))
rmat[-nz:] = 1
print('t_test mean of random effects variables are zero')
print(res.t_test(rmat))
print('f_test mean of both random effects variables is zero (joint hypothesis)')
print(res.f_test(rmat))
plots = res.plot_random_univariate() #(bins=50)
fig = res.plot_scatter_all_pairs()
import matplotlib.pyplot as plt
plt.show()
|
bsd-3-clause
|
gregcaporaso/scikit-bio
|
skbio/stats/composition.py
|
4
|
43580
|
r"""
Composition Statistics (:mod:`skbio.stats.composition`)
=======================================================
.. currentmodule:: skbio.stats.composition
This module provides functions for compositional data analysis.
Many 'omics datasets are inherently compositional - meaning that they
are best interpreted as proportions or percentages rather than
absolute counts.
Formally, :math:`x` is a composition if :math:`\sum_{i=0}^D x_{i} = c`
and :math:`x_{i} > 0`, :math:`1 \leq i \leq D` and :math:`c` is a real
valued constant and there are :math:`D` components for each
composition. In this module :math:`c=1`. Compositional data can be
analyzed using Aitchison geometry. [1]_
However, in this framework, standard real Euclidean operations such as
addition and multiplication no longer apply. Only operations such as
perturbation and power can be used to manipulate this data.
This module allows two styles of manipulation of compositional data.
Compositional data can be analyzed using perturbation and power
operations, which can be useful for simulation studies. The
alternative strategy is to transform compositional data into the real
space. Right now, the centre log ratio transform (clr) and
the isometric log ratio transform (ilr) [2]_ can be used to accomplish
this. This transform can be useful for performing standard statistical
tools such as parametric hypothesis testing, regressions and more.
The major caveat of using this framework is dealing with zeros. In
the Aitchison geometry, only compositions with nonzero components can
be considered. The multiplicative replacement technique [3]_ can be
used to substitute these zeros with small pseudocounts without
introducing major distortions to the data.
Functions
---------
.. autosummary::
:toctree:
closure
multiplicative_replacement
perturb
perturb_inv
power
inner
clr
clr_inv
ilr
ilr_inv
alr
alr_inv
centralize
ancom
sbp_basis
References
----------
.. [1] V. Pawlowsky-Glahn, J. J. Egozcue, R. Tolosana-Delgado (2015),
Modeling and Analysis of Compositional Data, Wiley, Chichester, UK
.. [2] J. J. Egozcue., "Isometric Logratio Transformations for
Compositional Data Analysis" Mathematical Geology, 35.3 (2003)
.. [3] J. A. Martin-Fernandez, "Dealing With Zeros and Missing Values in
Compositional Data Sets Using Nonparametric Imputation",
Mathematical Geology, 35.3 (2003)
Examples
--------
>>> import numpy as np
Consider a very simple environment with only 3 species. The species
in the environment are equally distributed and their proportions are
equivalent:
>>> otus = np.array([1./3, 1./3., 1./3])
Suppose that an antibiotic kills off half of the population for the
first two species, but doesn't harm the third species. Then the
perturbation vector would be as follows
>>> antibiotic = np.array([1./2, 1./2, 1])
And the resulting perturbation would be
>>> perturb(otus, antibiotic)
array([ 0.25, 0.25, 0.5 ])
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
import scipy.stats
import skbio.util
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def closure(mat):
"""
Performs closure to ensure that all elements add up to 1.
Parameters
----------
mat : array_like
a matrix of proportions where
rows = compositions
columns = components
Returns
-------
array_like, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Raises
------
ValueError
Raises an error if any values are negative.
ValueError
Raises an error if the matrix has more than 2 dimension.
ValueError
Raises an error if there is a row that has all zeros.
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import closure
>>> X = np.array([[2, 2, 6], [4, 4, 2]])
>>> closure(X)
array([[ 0.2, 0.2, 0.6],
[ 0.4, 0.4, 0.2]])
"""
mat = np.atleast_2d(mat)
if np.any(mat < 0):
raise ValueError("Cannot have negative proportions")
if mat.ndim > 2:
raise ValueError("Input matrix can only have two dimensions or less")
if np.all(mat == 0, axis=1).sum() > 0:
raise ValueError("Input matrix cannot have rows with all zeros")
mat = mat / mat.sum(axis=1, keepdims=True)
return mat.squeeze()
@experimental(as_of="0.4.0")
def multiplicative_replacement(mat, delta=None):
r"""Replace all zeros with small non-zero values
It uses the multiplicative replacement strategy [1]_ ,
replacing zeros with a small positive :math:`\delta`
and ensuring that the compositions still add up to 1.
Parameters
----------
mat: array_like
a matrix of proportions where
rows = compositions and
columns = components
delta: float, optional
a small number to be used to replace zeros
If delta is not specified, then the default delta is
:math:`\delta = \frac{1}{N^2}` where :math:`N`
is the number of components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Raises
------
ValueError
Raises an error if negative proportions are created due to a large
`delta`.
Notes
-----
This method will result in negative proportions if a large delta is chosen.
References
----------
.. [1] J. A. Martin-Fernandez. "Dealing With Zeros and Missing Values in
Compositional Data Sets Using Nonparametric Imputation"
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import multiplicative_replacement
>>> X = np.array([[.2,.4,.4, 0],[0,.5,.5,0]])
>>> multiplicative_replacement(X)
array([[ 0.1875, 0.375 , 0.375 , 0.0625],
[ 0.0625, 0.4375, 0.4375, 0.0625]])
"""
mat = closure(mat)
z_mat = (mat == 0)
num_feats = mat.shape[-1]
tot = z_mat.sum(axis=-1, keepdims=True)
if delta is None:
delta = (1. / num_feats)**2
zcnts = 1 - tot * delta
if np.any(zcnts) < 0:
raise ValueError('The multiplicative replacement created negative '
'proportions. Consider using a smaller `delta`.')
mat = np.where(z_mat, delta, zcnts * mat)
return mat.squeeze()
@experimental(as_of="0.4.0")
def perturb(x, y):
r"""
Performs the perturbation operation.
This operation is defined as
.. math::
x \oplus y = C[x_1 y_1, \ldots, x_D y_D]
:math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
y : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import perturb
>>> x = np.array([.1,.3,.4, .2])
>>> y = np.array([1./6,1./6,1./3,1./3])
>>> perturb(x,y)
array([ 0.0625, 0.1875, 0.5 , 0.25 ])
"""
x, y = closure(x), closure(y)
return closure(x * y)
@experimental(as_of="0.4.0")
def perturb_inv(x, y):
r"""
Performs the inverse perturbation operation.
This operation is defined as
.. math::
x \ominus y = C[x_1 y_1^{-1}, \ldots, x_D y_D^{-1}]
:math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like
a matrix of proportions where
rows = compositions and
columns = components
y : array_like
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import perturb_inv
>>> x = np.array([.1,.3,.4, .2])
>>> y = np.array([1./6,1./6,1./3,1./3])
>>> perturb_inv(x,y)
array([ 0.14285714, 0.42857143, 0.28571429, 0.14285714])
"""
x, y = closure(x), closure(y)
return closure(x / y)
@experimental(as_of="0.4.0")
def power(x, a):
r"""
Performs the power operation.
This operation is defined as follows
.. math::
`x \odot a = C[x_1^a, \ldots, x_D^a]
:math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
a : float
a scalar float
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import power
>>> x = np.array([.1,.3,.4, .2])
>>> power(x, .1)
array([ 0.23059566, 0.25737316, 0.26488486, 0.24714631])
"""
x = closure(x)
return closure(x**a).squeeze()
@experimental(as_of="0.4.0")
def inner(x, y):
r"""
Calculates the Aitchson inner product.
This inner product is defined as follows
.. math::
\langle x, y \rangle_a =
\frac{1}{2D} \sum\limits_{i=1}^{D} \sum\limits_{j=1}^{D}
\ln\left(\frac{x_i}{x_j}\right) \ln\left(\frac{y_i}{y_j}\right)
Parameters
----------
x : array_like
a matrix of proportions where
rows = compositions and
columns = components
y : array_like
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
inner product result
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import inner
>>> x = np.array([.1, .3, .4, .2])
>>> y = np.array([.2, .4, .2, .2])
>>> inner(x, y) # doctest: +ELLIPSIS
0.2107852473...
"""
x = closure(x)
y = closure(y)
a, b = clr(x), clr(y)
return a.dot(b.T)
@experimental(as_of="0.4.0")
def clr(mat):
r"""
Performs centre log ratio transformation.
This function transforms compositions from Aitchison geometry to
the real space. The :math:`clr` transform is both an isometry and an
isomorphism defined on the following spaces
:math:`clr: S^D \rightarrow U`
where :math:`U=
\{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}`
It is defined for a composition :math:`x` as follows:
.. math::
clr(x) = \ln\left[\frac{x_1}{g_m(x)}, \ldots, \frac{x_D}{g_m(x)}\right]
where :math:`g_m(x) = (\prod\limits_{i=1}^{D} x_i)^{1/D}` is the geometric
mean of :math:`x`.
Parameters
----------
mat : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
clr transformed matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import clr
>>> x = np.array([.1, .3, .4, .2])
>>> clr(x)
array([-0.79451346, 0.30409883, 0.5917809 , -0.10136628])
"""
mat = closure(mat)
lmat = np.log(mat)
gm = lmat.mean(axis=-1, keepdims=True)
return (lmat - gm).squeeze()
@experimental(as_of="0.4.0")
def clr_inv(mat):
r"""
Performs inverse centre log ratio transformation.
This function transforms compositions from the real space to
Aitchison geometry. The :math:`clr^{-1}` transform is both an isometry,
and an isomorphism defined on the following spaces
:math:`clr^{-1}: U \rightarrow S^D`
where :math:`U=
\{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}`
This transformation is defined as follows
.. math::
clr^{-1}(x) = C[\exp( x_1, \ldots, x_D)]
Parameters
----------
mat : array_like, float
a matrix of real values where
rows = transformed compositions and
columns = components
Returns
-------
numpy.ndarray
inverse clr transformed matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import clr_inv
>>> x = np.array([.1, .3, .4, .2])
>>> clr_inv(x)
array([ 0.21383822, 0.26118259, 0.28865141, 0.23632778])
"""
return closure(np.exp(mat))
@experimental(as_of="0.4.0")
def ilr(mat, basis=None, check=True):
r"""
Performs isometric log ratio transformation.
This function transforms compositions from Aitchison simplex to
the real space. The :math: ilr` transform is both an isometry,
and an isomorphism defined on the following spaces
:math:`ilr: S^D \rightarrow \mathbb{R}^{D-1}`
The ilr transformation is defined as follows
.. math::
ilr(x) =
[\langle x, e_1 \rangle_a, \ldots, \langle x, e_{D-1} \rangle_a]
where :math:`[e_1,\ldots,e_{D-1}]` is an orthonormal basis in the simplex.
If an orthornormal basis isn't specified, the J. J. Egozcue orthonormal
basis derived from Gram-Schmidt orthogonalization will be used by
default.
Parameters
----------
mat: numpy.ndarray
a matrix of proportions where
rows = compositions and
columns = components
basis: numpy.ndarray, float, optional
orthonormal basis for Aitchison simplex
defaults to J.J.Egozcue orthonormal basis.
check: bool
Specifies if the basis is orthonormal.
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import ilr
>>> x = np.array([.1, .3, .4, .2])
>>> ilr(x)
array([-0.7768362 , -0.68339802, 0.11704769])
Notes
-----
If the `basis` parameter is specified, it is expected to be a basis in the
Aitchison simplex. If there are `D-1` elements specified in `mat`, then
the dimensions of the basis needs be `D-1 x D`, where rows represent
basis vectors, and the columns represent proportions.
"""
mat = closure(mat)
if basis is None:
basis = clr_inv(_gram_schmidt_basis(mat.shape[-1]))
else:
if len(basis.shape) != 2:
raise ValueError("Basis needs to be a 2D matrix, "
"not a %dD matrix." %
(len(basis.shape)))
if check:
_check_orthogonality(basis)
return inner(mat, basis)
@experimental(as_of="0.4.0")
def ilr_inv(mat, basis=None, check=True):
r"""
Performs inverse isometric log ratio transform.
This function transforms compositions from the real space to
Aitchison geometry. The :math:`ilr^{-1}` transform is both an isometry,
and an isomorphism defined on the following spaces
:math:`ilr^{-1}: \mathbb{R}^{D-1} \rightarrow S^D`
The inverse ilr transformation is defined as follows
.. math::
ilr^{-1}(x) = \bigoplus\limits_{i=1}^{D-1} x \odot e_i
where :math:`[e_1,\ldots, e_{D-1}]` is an orthonormal basis in the simplex.
If an orthonormal basis isn't specified, the J. J. Egozcue orthonormal
basis derived from Gram-Schmidt orthogonalization will be used by
default.
Parameters
----------
mat: numpy.ndarray, float
a matrix of transformed proportions where
rows = compositions and
columns = components
basis: numpy.ndarray, float, optional
orthonormal basis for Aitchison simplex
defaults to J.J.Egozcue orthonormal basis
check: bool
Specifies if the basis is orthonormal.
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import ilr
>>> x = np.array([.1, .3, .6,])
>>> ilr_inv(x)
array([ 0.34180297, 0.29672718, 0.22054469, 0.14092516])
Notes
-----
If the `basis` parameter is specified, it is expected to be a basis in the
Aitchison simplex. If there are `D-1` elements specified in `mat`, then
the dimensions of the basis needs be `D-1 x D`, where rows represent
basis vectors, and the columns represent proportions.
"""
if basis is None:
basis = _gram_schmidt_basis(mat.shape[-1] + 1)
else:
if len(basis.shape) != 2:
raise ValueError("Basis needs to be a 2D matrix, "
"not a %dD matrix." %
(len(basis.shape)))
if check:
_check_orthogonality(basis)
# this is necessary, since the clr function
# performs np.squeeze()
basis = np.atleast_2d(clr(basis))
return clr_inv(np.dot(mat, basis))
@experimental(as_of="0.5.5")
def alr(mat, denominator_idx=0):
r"""
Performs additive log ratio transformation.
This function transforms compositions from a D-part Aitchison simplex to
a non-isometric real space of D-1 dimensions. The argument
`denominator_col` defines the index of the column used as the common
denominator. The :math: `alr` transformed data are amenable to multivariate
analysis as long as statistics don't involve distances.
:math:`alr: S^D \rightarrow \mathbb{R}^{D-1}`
The alr transformation is defined as follows
.. math::
alr(x) = \left[ \ln \frac{x_1}{x_D}, \ldots,
\ln \frac{x_{D-1}}{x_D} \right]
where :math:`D` is the index of the part used as common denominator.
Parameters
----------
mat: numpy.ndarray
a matrix of proportions where
rows = compositions and
columns = components
denominator_idx: int
the index of the column (2D-matrix) or position (vector) of
`mat` which should be used as the reference composition. By default
`denominator_idx=0` to specify the first column or position.
Returns
-------
numpy.ndarray
alr-transformed data projected in a non-isometric real space
of D-1 dimensions for a D-parts composition
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import alr
>>> x = np.array([.1, .3, .4, .2])
>>> alr(x)
array([ 1.09861229, 1.38629436, 0.69314718])
"""
mat = closure(mat)
if mat.ndim == 2:
mat_t = mat.T
numerator_idx = list(range(0, mat_t.shape[0]))
del numerator_idx[denominator_idx]
lr = np.log(mat_t[numerator_idx, :]/mat_t[denominator_idx, :]).T
elif mat.ndim == 1:
numerator_idx = list(range(0, mat.shape[0]))
del numerator_idx[denominator_idx]
lr = np.log(mat[numerator_idx]/mat[denominator_idx])
else:
raise ValueError("mat must be either 1D or 2D")
return lr
@experimental(as_of="0.5.5")
def alr_inv(mat, denominator_idx=0):
r"""
Performs inverse additive log ratio transform.
This function transforms compositions from the non-isometric real space of
alrs to Aitchison geometry.
:math:`alr^{-1}: \mathbb{R}^{D-1} \rightarrow S^D`
The inverse alr transformation is defined as follows
.. math::
alr^{-1}(x) = C[exp([y_1, y_2, ..., y_{D-1}, 0])]
where :math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
mat: numpy.ndarray
a matrix of alr-transformed data
denominator_idx: int
the index of the column (2D-composition) or position (1D-composition) of
the output where the common denominator should be placed. By default
`denominator_idx=0` to specify the first column or position.
Returns
-------
numpy.ndarray
Inverse alr transformed matrix or vector where rows sum to 1.
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import alr, alr_inv
>>> x = np.array([.1, .3, .4, .2])
>>> alr_inv(alr(x))
array([ 0.1, 0.3, 0.4, 0.2])
"""
mat = np.array(mat)
if mat.ndim == 2:
mat_idx = np.insert(mat, denominator_idx,
np.repeat(0, mat.shape[0]), axis=1)
comp = np.zeros(mat_idx.shape)
comp[:, denominator_idx] = 1 / (np.exp(mat).sum(axis=1) + 1)
numerator_idx = list(range(0, comp.shape[1]))
del numerator_idx[denominator_idx]
for i in numerator_idx:
comp[:, i] = comp[:, denominator_idx] * np.exp(mat_idx[:, i])
elif mat.ndim == 1:
mat_idx = np.insert(mat, denominator_idx, 0, axis=0)
comp = np.zeros(mat_idx.shape)
comp[denominator_idx] = 1 / (np.exp(mat).sum(axis=0) + 1)
numerator_idx = list(range(0, comp.shape[0]))
del numerator_idx[denominator_idx]
for i in numerator_idx:
comp[i] = comp[denominator_idx] * np.exp(mat_idx[i])
else:
raise ValueError("mat must be either 1D or 2D")
return comp
@experimental(as_of="0.4.0")
def centralize(mat):
r"""Center data around its geometric average.
Parameters
----------
mat : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
centered composition matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import centralize
>>> X = np.array([[.1,.3,.4, .2],[.2,.2,.2,.4]])
>>> centralize(X)
array([[ 0.17445763, 0.30216948, 0.34891526, 0.17445763],
[ 0.32495488, 0.18761279, 0.16247744, 0.32495488]])
"""
mat = closure(mat)
cen = scipy.stats.gmean(mat, axis=0)
return perturb_inv(mat, cen)
@experimental(as_of="0.4.1")
def ancom(table, grouping,
alpha=0.05,
tau=0.02,
theta=0.1,
multiple_comparisons_correction='holm-bonferroni',
significance_test=None,
percentiles=(0.0, 25.0, 50.0, 75.0, 100.0)):
r""" Performs a differential abundance test using ANCOM.
This is done by calculating pairwise log ratios between all features
and performing a significance test to determine if there is a significant
difference in feature ratios with respect to the variable of interest.
In an experiment with only two treatments, this tests the following
hypothesis for feature :math:`i`
.. math::
H_{0i}: \mathbb{E}[\ln(u_i^{(1)})] = \mathbb{E}[\ln(u_i^{(2)})]
where :math:`u_i^{(1)}` is the mean abundance for feature :math:`i` in the
first group and :math:`u_i^{(2)}` is the mean abundance for feature
:math:`i` in the second group.
Parameters
----------
table : pd.DataFrame
A 2D matrix of strictly positive values (i.e. counts or proportions)
where the rows correspond to samples and the columns correspond to
features.
grouping : pd.Series
Vector indicating the assignment of samples to groups. For example,
these could be strings or integers denoting which group a sample
belongs to. It must be the same length as the samples in `table`.
The index must be the same on `table` and `grouping` but need not be
in the same order.
alpha : float, optional
Significance level for each of the statistical tests.
This can can be anywhere between 0 and 1 exclusive.
tau : float, optional
A constant used to determine an appropriate cutoff.
A value close to zero indicates a conservative cutoff.
This can can be anywhere between 0 and 1 exclusive.
theta : float, optional
Lower bound for the proportion for the W-statistic.
If all W-statistics are lower than theta, then no features
will be detected to be differentially significant.
This can can be anywhere between 0 and 1 exclusive.
multiple_comparisons_correction : {None, 'holm-bonferroni'}, optional
The multiple comparison correction procedure to run. If None,
then no multiple comparison correction procedure will be run.
If 'holm-boniferroni' is specified, then the Holm-Boniferroni
procedure [1]_ will be run.
significance_test : function, optional
A statistical significance function to test for significance between
classes. This function must be able to accept at least two 1D
array_like arguments of floats and returns a test statistic and a
p-value. By default ``scipy.stats.f_oneway`` is used.
percentiles : iterable of floats, optional
Percentile abundances to return for each feature in each group. By
default, will return the minimum, 25th percentile, median, 75th
percentile, and maximum abundances for each feature in each group.
Returns
-------
pd.DataFrame
A table of features, their W-statistics and whether the null hypothesis
is rejected.
`"W"` is the W-statistic, or number of features that a single feature
is tested to be significantly different against.
`"Reject null hypothesis"` indicates if feature is differentially
abundant across groups (`True`) or not (`False`).
pd.DataFrame
A table of features and their percentile abundances in each group. If
``percentiles`` is empty, this will be an empty ``pd.DataFrame``. The
rows in this object will be features, and the columns will be a
multi-index where the first index is the percentile, and the second
index is the group.
See Also
--------
multiplicative_replacement
scipy.stats.ttest_ind
scipy.stats.f_oneway
scipy.stats.wilcoxon
scipy.stats.kruskal
Notes
-----
The developers of this method recommend the following significance tests
([2]_, Supplementary File 1, top of page 11): if there are 2 groups, use
the standard parametric t-test (``scipy.stats.ttest_ind``) or
non-parametric Wilcoxon rank sum test (``scipy.stats.wilcoxon``).
If there are more than 2 groups, use parametric one-way ANOVA
(``scipy.stats.f_oneway``) or nonparametric Kruskal-Wallis
(``scipy.stats.kruskal``). Because one-way ANOVA is equivalent
to the standard t-test when the number of groups is two, we default to
``scipy.stats.f_oneway`` here, which can be used when there are two or
more groups. Users should refer to the documentation of these tests in
SciPy to understand the assumptions made by each test.
This method cannot handle any zero counts as input, since the logarithm
of zero cannot be computed. While this is an unsolved problem, many
studies, including [2]_, have shown promising results by adding
pseudocounts to all values in the matrix. In [2]_, a pseudocount of 0.001
was used, though the authors note that a pseudocount of 1.0 may also be
useful. Zero counts can also be addressed using the
``multiplicative_replacement`` method.
References
----------
.. [1] Holm, S. "A simple sequentially rejective multiple test procedure".
Scandinavian Journal of Statistics (1979), 6.
.. [2] Mandal et al. "Analysis of composition of microbiomes: a novel
method for studying microbial composition", Microbial Ecology in Health
& Disease, (2015), 26.
Examples
--------
First import all of the necessary modules:
>>> from skbio.stats.composition import ancom
>>> import pandas as pd
Now let's load in a DataFrame with 6 samples and 7 features (e.g.,
these may be bacterial OTUs):
>>> table = pd.DataFrame([[12, 11, 10, 10, 10, 10, 10],
... [9, 11, 12, 10, 10, 10, 10],
... [1, 11, 10, 11, 10, 5, 9],
... [22, 21, 9, 10, 10, 10, 10],
... [20, 22, 10, 10, 13, 10, 10],
... [23, 21, 14, 10, 10, 10, 10]],
... index=['s1', 's2', 's3', 's4', 's5', 's6'],
... columns=['b1', 'b2', 'b3', 'b4', 'b5', 'b6',
... 'b7'])
Then create a grouping vector. In this example, there is a treatment group
and a placebo group.
>>> grouping = pd.Series(['treatment', 'treatment', 'treatment',
... 'placebo', 'placebo', 'placebo'],
... index=['s1', 's2', 's3', 's4', 's5', 's6'])
Now run ``ancom`` to determine if there are any features that are
significantly different in abundance between the treatment and the placebo
groups. The first DataFrame that is returned contains the ANCOM test
results, and the second contains the percentile abundance data for each
feature in each group.
>>> ancom_df, percentile_df = ancom(table, grouping)
>>> ancom_df['W']
b1 0
b2 4
b3 0
b4 1
b5 1
b6 0
b7 1
Name: W, dtype: int64
The W-statistic is the number of features that a single feature is tested
to be significantly different against. In this scenario, `b2` was detected
to have significantly different abundances compared to four of the other
features. To summarize the results from the W-statistic, let's take a look
at the results from the hypothesis test. The `Reject null hypothesis`
column in the table indicates whether the null hypothesis was rejected,
and that a feature was therefore observed to be differentially abundant
across the groups.
>>> ancom_df['Reject null hypothesis']
b1 False
b2 True
b3 False
b4 False
b5 False
b6 False
b7 False
Name: Reject null hypothesis, dtype: bool
From this we can conclude that only `b2` was significantly different in
abundance between the treatment and the placebo. We still don't know, for
example, in which group `b2` was more abundant. We therefore may next be
interested in comparing the abundance of `b2` across the two groups.
We can do that using the second DataFrame that was returned. Here we
compare the median (50th percentile) abundance of `b2` in the treatment and
placebo groups:
>>> percentile_df[50.0].loc['b2']
Group
placebo 21.0
treatment 11.0
Name: b2, dtype: float64
We can also look at a full five-number summary for ``b2`` in the treatment
and placebo groups:
>>> percentile_df.loc['b2'] # doctest: +NORMALIZE_WHITESPACE
Percentile Group
0.0 placebo 21.0
25.0 placebo 21.0
50.0 placebo 21.0
75.0 placebo 21.5
100.0 placebo 22.0
0.0 treatment 11.0
25.0 treatment 11.0
50.0 treatment 11.0
75.0 treatment 11.0
100.0 treatment 11.0
Name: b2, dtype: float64
Taken together, these data tell us that `b2` is present in significantly
higher abundance in the placebo group samples than in the treatment group
samples.
"""
if not isinstance(table, pd.DataFrame):
raise TypeError('`table` must be a `pd.DataFrame`, '
'not %r.' % type(table).__name__)
if not isinstance(grouping, pd.Series):
raise TypeError('`grouping` must be a `pd.Series`,'
' not %r.' % type(grouping).__name__)
if np.any(table <= 0):
raise ValueError('Cannot handle zeros or negative values in `table`. '
'Use pseudocounts or ``multiplicative_replacement``.'
)
if not 0 < alpha < 1:
raise ValueError('`alpha`=%f is not within 0 and 1.' % alpha)
if not 0 < tau < 1:
raise ValueError('`tau`=%f is not within 0 and 1.' % tau)
if not 0 < theta < 1:
raise ValueError('`theta`=%f is not within 0 and 1.' % theta)
if multiple_comparisons_correction is not None:
if multiple_comparisons_correction != 'holm-bonferroni':
raise ValueError('%r is not an available option for '
'`multiple_comparisons_correction`.'
% multiple_comparisons_correction)
if (grouping.isnull()).any():
raise ValueError('Cannot handle missing values in `grouping`.')
if (table.isnull()).any().any():
raise ValueError('Cannot handle missing values in `table`.')
percentiles = list(percentiles)
for percentile in percentiles:
if not 0.0 <= percentile <= 100.0:
raise ValueError('Percentiles must be in the range [0, 100], %r '
'was provided.' % percentile)
duplicates = skbio.util.find_duplicates(percentiles)
if duplicates:
formatted_duplicates = ', '.join(repr(e) for e in duplicates)
raise ValueError('Percentile values must be unique. The following'
' value(s) were duplicated: %s.' %
formatted_duplicates)
groups = np.unique(grouping)
num_groups = len(groups)
if num_groups == len(grouping):
raise ValueError(
"All values in `grouping` are unique. This method cannot "
"operate on a grouping vector with only unique values (e.g., "
"there are no 'within' variance because each group of samples "
"contains only a single sample).")
if num_groups == 1:
raise ValueError(
"All values the `grouping` are the same. This method cannot "
"operate on a grouping vector with only a single group of samples"
"(e.g., there are no 'between' variance because there is only a "
"single group).")
if significance_test is None:
significance_test = scipy.stats.f_oneway
table_index_len = len(table.index)
grouping_index_len = len(grouping.index)
mat, cats = table.align(grouping, axis=0, join='inner')
if (len(mat) != table_index_len or len(cats) != grouping_index_len):
raise ValueError('`table` index and `grouping` '
'index must be consistent.')
n_feat = mat.shape[1]
_logratio_mat = _log_compare(mat.values, cats.values, significance_test)
logratio_mat = _logratio_mat + _logratio_mat.T
# Multiple comparisons
if multiple_comparisons_correction == 'holm-bonferroni':
logratio_mat = np.apply_along_axis(_holm_bonferroni,
1, logratio_mat)
np.fill_diagonal(logratio_mat, 1)
W = (logratio_mat < alpha).sum(axis=1)
c_start = W.max() / n_feat
if c_start < theta:
reject = np.zeros_like(W, dtype=bool)
else:
# Select appropriate cutoff
cutoff = c_start - np.linspace(0.05, 0.25, 5)
prop_cut = np.array([(W > n_feat*cut).mean() for cut in cutoff])
dels = np.abs(prop_cut - np.roll(prop_cut, -1))
dels[-1] = 0
if (dels[0] < tau) and (dels[1] < tau) and (dels[2] < tau):
nu = cutoff[1]
elif (dels[0] >= tau) and (dels[1] < tau) and (dels[2] < tau):
nu = cutoff[2]
elif (dels[1] >= tau) and (dels[2] < tau) and (dels[3] < tau):
nu = cutoff[3]
else:
nu = cutoff[4]
reject = (W >= nu*n_feat)
feat_ids = mat.columns
ancom_df = pd.DataFrame(
{'W': pd.Series(W, index=feat_ids),
'Reject null hypothesis': pd.Series(reject, index=feat_ids)})
if len(percentiles) == 0:
return ancom_df, pd.DataFrame()
else:
data = []
columns = []
for group in groups:
feat_dists = mat[cats == group]
for percentile in percentiles:
columns.append((percentile, group))
data.append(np.percentile(feat_dists, percentile, axis=0))
columns = pd.MultiIndex.from_tuples(columns,
names=['Percentile', 'Group'])
percentile_df = pd.DataFrame(
np.asarray(data).T, columns=columns, index=feat_ids)
return ancom_df, percentile_df
def _holm_bonferroni(p):
""" Performs Holm-Bonferroni correction for pvalues
to account for multiple comparisons
Parameters
---------
p: numpy.array
array of pvalues
Returns
-------
numpy.array
corrected pvalues
"""
K = len(p)
sort_index = -np.ones(K, dtype=np.int64)
sorted_p = np.sort(p)
sorted_p_adj = sorted_p*(K-np.arange(K))
for j in range(K):
idx = (p == sorted_p[j]) & (sort_index < 0)
num_ties = len(sort_index[idx])
sort_index[idx] = np.arange(j, (j+num_ties), dtype=np.int64)
sorted_holm_p = [min([max(sorted_p_adj[:k]), 1])
for k in range(1, K+1)]
holm_p = [sorted_holm_p[sort_index[k]] for k in range(K)]
return holm_p
def _log_compare(mat, cats,
significance_test=scipy.stats.ttest_ind):
""" Calculates pairwise log ratios between all features and performs a
significiance test (i.e. t-test) to determine if there is a significant
difference in feature ratios with respect to the variable of interest.
Parameters
----------
mat: np.array
rows correspond to samples and columns correspond to
features (i.e. OTUs)
cats: np.array, float
Vector of categories
significance_test: function
statistical test to run
Returns:
--------
log_ratio : np.array
log ratio pvalue matrix
"""
r, c = mat.shape
log_ratio = np.zeros((c, c))
log_mat = np.log(mat)
cs = np.unique(cats)
def func(x):
return significance_test(*[x[cats == k] for k in cs])
for i in range(c-1):
ratio = (log_mat[:, i].T - log_mat[:, i+1:].T).T
m, p = np.apply_along_axis(func,
axis=0,
arr=ratio)
log_ratio[i, i+1:] = np.squeeze(np.array(p.T))
return log_ratio
def _gram_schmidt_basis(n):
"""
Builds clr transformed basis derived from
gram schmidt orthogonalization
Parameters
----------
n : int
Dimension of the Aitchison simplex
"""
basis = np.zeros((n, n-1))
for j in range(n-1):
i = j + 1
e = np.array([(1/i)]*i + [-1] +
[0]*(n-i-1))*np.sqrt(i/(i+1))
basis[:, j] = e
return basis.T
@experimental(as_of="0.5.5")
def sbp_basis(sbp):
r"""
Builds an orthogonal basis from a sequential binary partition (SBP). As
explained in [1]_, the SBP is a hierarchical collection of binary
divisions of compositional parts. The child groups are divided again until
all groups contain a single part. The SBP can be encoded in a
:math:`(D - 1) \times D` matrix where, for each row, parts can be grouped
by -1 and +1 tags, and 0 for excluded parts. The `sbp_basis` method was
originally derived from function `gsi.buildilrBase()` found in the R
package `compositions` [2]_. The ith balance is computed as follows
.. math::
b_i = \sqrt{ \frac{r_i s_i}{r_i+s_i} }
\ln \left( \frac{g(x_{r_i})}{g(x_{s_i})} \right)
where :math:`b_i` is the ith balance corresponding to the ith row in the
SBP, :math:`r_i` and :math:`s_i` and the number of respectively `+1` and
`-1` labels in the ith row of the SBP and where :math:`g(x) =
(\prod\limits_{i=1}^{D} x_i)^{1/D}` is the geometric mean of :math:`x`.
Parameters
----------
sbp: np.array, int
A contrast matrix, also known as a sequential binary partition, where
every row represents a partition between two groups of features. A part
labelled `+1` would correspond to that feature being in the numerator
of the given row partition, a part labelled `-1` would correspond to
features being in the denominator of that given row partition, and `0`
would correspond to features excluded in the row partition.
Returns
-------
numpy.array
An orthonormal basis in the Aitchison simplex
Examples
--------
>>> import numpy as np
>>> sbp = np.array([[1, 1,-1,-1,-1],
... [1,-1, 0, 0, 0],
... [0, 0, 1,-1,-1],
... [0, 0, 0, 1,-1]])
...
>>> sbp_basis(sbp)
array([[ 0.31209907, 0.31209907, 0.12526729, 0.12526729, 0.12526729],
[ 0.36733337, 0.08930489, 0.18112058, 0.18112058, 0.18112058],
[ 0.17882092, 0.17882092, 0.40459293, 0.11888261, 0.11888261],
[ 0.18112058, 0.18112058, 0.18112058, 0.36733337, 0.08930489]])
References
----------
.. [1] Parent, S.É., Parent, L.E., Egozcue, J.J., Rozane, D.E.,
Hernandes, A., Lapointe, L., Hébert-Gentile, V., Naess, K.,
Marchand, S., Lafond, J., Mattos, D., Barlow, P., Natale, W., 2013.
The plant ionome revisited by the nutrient balance concept.
Front. Plant Sci. 4, 39, http://dx.doi.org/10.3389/fpls.2013.00039.
.. [2] van den Boogaart, K. Gerald, Tolosana-Delgado, Raimon and Bren,
Matevz, 2014. `compositions`: Compositional Data Analysis. R package
version 1.40-1. https://CRAN.R-project.org/package=compositions.
"""
n_pos = (sbp == 1).sum(axis=1)
n_neg = (sbp == -1).sum(axis=1)
psi = np.zeros(sbp.shape)
for i in range(0, sbp.shape[0]):
psi[i, :] = sbp[i, :] * np.sqrt((n_neg[i] / n_pos[i])**sbp[i, :] /
np.sum(np.abs(sbp[i, :])))
return clr_inv(psi)
def _check_orthogonality(basis):
"""
Checks to see if basis is truly orthonormal in the
Aitchison simplex
Parameters
----------
basis: numpy.ndarray
basis in the Aitchison simplex
"""
basis = np.atleast_2d(basis)
if not np.allclose(inner(basis, basis), np.identity(len(basis)),
rtol=1e-4, atol=1e-6):
raise ValueError("Aitchison basis is not orthonormal")
|
bsd-3-clause
|
graalvm/mx
|
mx_benchplot.py
|
1
|
18183
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
from __future__ import print_function
import json
from argparse import ArgumentParser, REMAINDER
from argparse import RawTextHelpFormatter
import os.path
import sys
import mx
def suite_context_free(func):
"""
Decorator for commands that don't need a primary suite.
"""
mx._suite_context_free.append(func.__name__)
return func
def unique_prefix(s, choices):
r = [x for x in choices if x.startswith(s)]
return r[0] if len(r) == 1 else s
@suite_context_free
def benchtable(args):
parser = ArgumentParser(
prog="mx benchtable",
description=
"""
Generate a table of benchmark results for a set of JSON benchmark
result files. By default this emits a text formatted table with a
colum for each result and a column reporting the percentage change
relative to the first set of results. All files must come from the same
benchmark suite.
""",
formatter_class=RawTextHelpFormatter)
parser.add_argument('-b', '--benchmarks', help="""Restrict output to comma separated list of benchmarks.
This also controls the output order of the results.""", type=lambda s: s.split(','))
parser.add_argument('--format', action='store', choices=['text', 'csv', 'jira', 'markdown'], default='text', help='Set the output format. (Default: text)')
diff_choices = ['percent', 'absolute', 'none']
parser.add_argument('--diff', default='percent', choices=diff_choices, type=lambda s: unique_prefix(s, diff_choices),
help='Add a column reporting the difference relative the first score. (Default: percent)')
parser.add_argument('-f', '--file', default=None, help='Generate the table into a file.')
parser.add_argument('-S', '--samples', help="""\
Controls sampling of the data for the graphs. A positive number selects
the last n data points and a negative number selects the first n data points.
By default only report the last data point""", type=int, default=None)
parser.add_argument('--variance', action='store_true', help='Report the percentage variance of the scores.')
parser.add_argument('-n', '--names', help='A list of comma separate names for each file. \n' +
'It must have the same number of entries as the files.', type=lambda s: s.split(','))
parser.add_argument('files', help='List of files', nargs=REMAINDER)
args = parser.parse_args(args)
if args.diff == 'none':
args.diff = None
benchmarks, results, names = extract_results(args.files, args.names, args.samples, args.benchmarks)
score_key = 'score'
variance_key = 'variance'
if args.samples:
score_key = 'trimmed_score'
variance_key = 'trimmed_variance'
handle = open(args.file, 'w') if args.file else sys.stdout
# build a collection of rows and compute padding required to align them
table = []
widths = []
specifiers = []
headers = []
for benchmark in benchmarks:
first_score = None
row = [benchmark]
specifiers = ['s']
headers = ['Benchmark']
first = True
for resultname, result in zip(names, results):
score = None
variance = None
scale = None
if result.get(benchmark):
score = result[benchmark][score_key]
variance = result[benchmark][variance_key]
if not result[benchmark]['higher']:
scale = -1
else:
scale = 1
if score:
if first:
first_score = score
row.append('%.2f' % score)
specifiers.append('s')
else:
row.append('N/A')
specifiers.append('s')
headers.append(resultname)
if args.variance:
if score:
row.append('%.2f%%' % variance)
else:
row.append('')
specifiers.append('s')
headers.append('Variance')
if not first and args.diff:
if score and first_score:
# if the first score is missing then don't report any change
if args.diff == 'percent':
row.append('%.2f%%' % ((score - first_score) * 100.0 * scale / first_score))
else:
row.append('%.2f' % ((score - first_score) * scale))
else:
row.append('')
specifiers.append('s')
if args.diff == 'percent':
headers.append('Change')
else:
headers.append('Delta')
first = False
table.append(row)
w = [max(len(h), len(('%' + spec) % (x))) for spec, x, h in zip(specifiers, row, headers)]
if len(widths) == 0:
widths = w
else:
widths = list(map(max, widths, w))
if args.format == 'text':
handle.write(' '.join(['%' + str(w) + 's' for w in widths]) % tuple(headers) + '\n')
format_string = ' '.join(['%' + str(w) + s for s, w in zip(specifiers, widths)])
for row in table:
handle.write(format_string % tuple(row) + '\n')
else:
header_sep = None
row_sep = None
header_terminator = ''
row_terminator = ''
header_separator = None
if args.format == 'jira':
header_sep = '||'
header_terminator = '||'
row_sep = '|'
row_terminator = '|'
elif args.format == 'csv':
header_sep = ','
row_sep = ','
elif args.format == 'markdown':
header_sep = '|'
row_sep = '|'
header_terminator = '|'
row_terminator = '|'
# Bitbucket server doesn't respect the alignment colons and
# not all markdown processors support tables.
header_separator = '---:'
else:
mx.abort('Unhandled format: ' + args.format)
handle.write(header_terminator + header_sep.join(headers) + header_terminator + '\n')
if header_separator:
handle.write(header_terminator + header_sep.join([header_separator for h in headers]) + header_terminator + '\n')
formats = ['%' + str(w) + s for s, w in zip(specifiers, widths)]
for row in table:
handle.write(row_terminator + row_sep.join([(f % r).strip() for r, f in zip(row, formats)]) + row_terminator + '\n')
if handle is not sys.stdout:
handle.close()
@suite_context_free
def benchplot(args):
parser = ArgumentParser(
prog="mx benchplot",
description="""
Generate a plot of benchmark results for a set of JSON benchmark
result files using the Python package matplotlib. By default this
produces a bar chart comparing the final score in each result file.
The --warmup option can be used to graph the individual scores in
sequence. All files must come from the same benchmark suite.
""",
formatter_class=RawTextHelpFormatter)
parser.add_argument('-w', '--warmup', action='store_true', help='Plot a warmup graph')
parser.add_argument('-b', '--benchmarks', help="""Restrict output to comma separated list of benchmarks.
This also controls the output order of the results.""", type=lambda s: s.split(','))
parser.add_argument('-f', '--file', default=None,
help="""\
Generate the graph into a file. The extension will determine the format,
which must be .png, .svg or .pdf.""")
parser.add_argument('-S', '--samples', help="""\
Controls sampling of the data for the graphs. A positive number selects
the last n data points and a negative number selects the first n data points.
A warmup graph reports all data points by default and the bar chart reports
on the last point""", type=int, default=None)
parser.add_argument('-n', '--names', help="""Provide a list of names for the plot files.
Otherwise the names are derived from the filenames.""", type=lambda s: s.split(','))
parser.add_argument('-c', '--colors', help='Provide alternate colors for the results', type=lambda s: s.split(','))
parser.add_argument('-C', '--columns', help='The number of columns in a warmup graph. Defaults to 2.', type=int, default=None)
parser.add_argument('-L', '--legend-location', help='Location for the legend.', default='upper-right',
choices=['upper-right', 'upper-left', 'lower-right', 'lower-left'])
parser.add_argument('-P', '--page-size', help='The width and height of the page. Default to 11,8.5.', type=lambda s: [float(x) for x in s.split(',')], default=[11, 8.5])
parser.add_argument('files', help='List of JSON benchmark result files', nargs=REMAINDER)
args = parser.parse_args(args)
args.legend_location = args.legend_location.replace('-', ' ')
if not args.warmup:
if args.columns:
mx.abort('Option -C/--columns is only applicable to warmup graphs')
last_n = None
if not args.warmup:
if not args.samples:
# only report the final score in bar graph.
last_n = 1
else:
last_n = args.samples
try:
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
benchmarks, results, names = extract_results(args.files, args.names, last_n, args.benchmarks)
score_key = 'score'
scores_key = 'scores'
if last_n:
score_key = 'trimmed_score'
scores_key = 'trimmed_scores'
if not args.colors:
args.colors = color_cycle[0:len(names)]
if not args.columns:
args.columns = 2
if args.warmup:
index = 1
rows = 1
cols = 1
if len(benchmarks) > 1:
cols = args.columns
rows = (len(benchmarks) + cols - 1) / cols
plt.figure(figsize=args.page_size, dpi=100)
for b in benchmarks:
ax = plt.subplot(rows, cols, index)
plt.title(b)
for resultname, result, color in zip(names, results, args.colors):
scores = []
xs = []
# missing results won't be plotted
if result.get(b):
scores = result[b][scores_key]
xs = range(1, len(scores) + 1)
if args.samples:
if args.samples > 0:
scores = scores[:args.samples]
xs = xs[:args.samples]
else:
scores = scores[args.samples:]
xs = xs[args.samples:]
plt.plot(xs, scores, label=resultname, color=color)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc=args.legend_location, fontsize='small', ncol=2)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylim(ymin=0)
index = index + 1
else:
_, ax = plt.subplots(figsize=args.page_size, dpi=100)
x = 0
bar_width = 0.25
spacing = 0.5
column_width = bar_width * len(names) + spacing
column_center = bar_width * ((len(names) - 1) / 2)
group = 0
rects = []
xticks = []
for name, color in zip(names, args.colors):
scores = []
xs = []
column = 0
xticks = []
for benchmark in benchmarks:
for resultname, result in zip(names, results):
if name == resultname:
if result.get(benchmark):
scores.append(result[benchmark][score_key])
xs.append(x + column * column_width + group * bar_width)
xticks.append(column * column_width + column_center)
column = column + 1
rects.append(ax.bar(xs, scores, width=bar_width, color=color))
group = group + 1
ax.legend(rects, names)
ax.set_xticks(xticks)
ax.set_xticklabels(benchmarks)
plt.tight_layout()
if args.file:
plt.savefig(args.file)
else:
plt.show()
except ImportError as e:
print(e)
mx.abort('matplotlib must be available to use benchplot. Install it using pip')
def extract_results(files, names, last_n=None, selected_benchmarks=None):
if names:
if len(names) != len(files):
mx.abort('Wrong number of names specified: {} files but {} names.'.format(len(files), len(names)))
else:
names = [os.path.splitext(os.path.basename(x))[0] for x in files]
if len(names) != len(set(names)):
mx.abort('Base file names are not unique. Specify names using --names')
results = []
benchmarks = []
bench_suite = None
for filename, name in zip(files, names):
result = {}
results.append(result)
with open(filename) as fp:
data = json.load(fp)
if not isinstance(data, dict) or not data.get('queries'):
mx.abort('{} doesn\'t appear to be a benchmark results file'.format(filename))
for entry in data['queries']:
benchmark = entry['benchmark']
if benchmark not in benchmarks:
benchmarks.append(benchmark)
if bench_suite is None:
bench_suite = entry['bench-suite']
else:
if bench_suite != entry['bench-suite']:
mx.abort("File '{}' contains bench-suite '{}' but expected '{}'.".format(filename, entry['bench-suite'], bench_suite))
score = entry['metric.value']
iteration = entry['metric.iteration']
scores = result.get(benchmark)
if not scores:
higher = entry['metric.better'] == 'higher'
result[benchmark] = {'scores': [], 'higher': higher, 'name': name}
scores = result.get(benchmark)
if entry['metric.name'] == 'warmup':
score_list = scores['scores']
while len(score_list) < iteration + 1:
score_list.append(None)
score_list[iteration] = score
elif entry['metric.name'] == 'final-time':
# ignore this value
pass
elif entry['metric.name'] == 'time' or entry['metric.name'] == 'throughput':
scores['last-score'] = score
for _, entry in result.items():
scores = entry['scores']
if entry.get('last-score'):
scores.append(entry['last-score'])
entry['scores'] = scores
if last_n and len(entry['scores']) >= abs(last_n):
if last_n < 0:
entry['trimmed_scores'] = entry['scores'][:-last_n]
else:
entry['trimmed_scores'] = entry['scores'][-last_n:]
entry['trimmed_count'] = len(entry['trimmed_scores'])
entry['trimmed_score'] = sum(entry['trimmed_scores']) / len(entry['trimmed_scores'])
entry['count'] = len(entry['scores'])
entry['score'] = sum(entry['scores']) / len(entry['scores'])
# Compute a variance value. This is a percentage variance relative to the average score
# which is easier to interpret than a raw number.
for _, entry in result.items():
variance = 0
for score in entry['scores']:
variance = variance + (score - entry['score']) * (score - entry['score'])
entry['variance'] = ((variance / entry['count']) / entry['score'])
if entry.get('trimmed_scores'):
variance = 0
for score in entry['trimmed_scores']:
variance = variance + (score - entry['trimmed_score']) * (score - entry['trimmed_score'])
entry['trimmed_variance'] = ((variance / entry['trimmed_count']) / entry['trimmed_score'])
if selected_benchmarks:
unknown_benchmarks = set(selected_benchmarks) - set(benchmarks)
if len(unknown_benchmarks) != 0:
mx.abort('Unknown benchmarks selected: {}\nAvailable benchmarks are: {}'.format(','.join(unknown_benchmarks), ','.join(benchmarks)))
benchmarks = selected_benchmarks
return benchmarks, results, names
|
gpl-2.0
|
lsst-ts/ts_wep
|
tests/task/test_generateDonutCatalogBase.py
|
1
|
4671
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import unittest
import numpy as np
import lsst.geom
from lsst.daf import butler as dafButler
from lsst.ts.wep.Utility import getModulePath
from lsst.ts.wep.task.GenerateDonutCatalogBase import (
GenerateDonutCatalogBaseTask,
GenerateDonutCatalogBaseConfig,
)
class TestGenerateDonutCatalogBase(unittest.TestCase):
def setUp(self):
self.config = GenerateDonutCatalogBaseConfig()
self.task = GenerateDonutCatalogBaseTask(config=self.config, name="Base Task")
moduleDir = getModulePath()
self.testDataDir = os.path.join(moduleDir, "tests", "testData")
self.repoDir = os.path.join(self.testDataDir, "gen3TestRepo")
self.centerRaft = ["R22_S10", "R22_S11"]
self.butler = dafButler.Butler(self.repoDir)
self.registry = self.butler.registry
def _getRefCat(self):
refCatList = []
datasetGenerator = self.registry.queryDatasets(
datasetType="cal_ref_cat", collections=["refcats"]
).expanded()
for ref in datasetGenerator:
refCatList.append(self.butler.getDeferred(ref, collections=["refcats"]))
return refCatList
def validateConfigs(self):
self.config.boresightRa = 0.03
self.config.boresightDec = -0.02
self.config.boresightRotAng = 90.0
self.config.filterName = "r"
self.task = GenerateDonutCatalogBaseTask(config=self.config)
self.assertEqual(self.task.boresightRa, 0.03)
self.assertEqual(self.task.boresightDec, -0.02)
self.assertEqual(self.task.boresightRotAng, 90.0)
self.assertEqual(self.task.filterName, "r")
def testFilterResults(self):
refCatList = self._getRefCat()
refCat = self.butler.get(
"cal_ref_cat", dataId=refCatList[0].dataId, collections=["refcats"]
)
testDataFrame = refCat.asAstropy().to_pandas()
filteredDataFrame = self.task.filterResults(testDataFrame)
np.testing.assert_array_equal(filteredDataFrame, testDataFrame)
def testGetRefObjLoader(self):
refCatList = self._getRefCat()
refObjLoader = self.task.getRefObjLoader(refCatList)
# Check that our refObjLoader loads the available objects
# within a given search radius
donutCatSmall = refObjLoader.loadSkyCircle(
lsst.geom.SpherePoint(0.0, 0.0, lsst.geom.degrees),
lsst.geom.Angle(0.5, lsst.geom.degrees),
filterName="g",
)
self.assertEqual(len(donutCatSmall.refCat), 8)
donutCatFull = refObjLoader.loadSkyCircle(
lsst.geom.SpherePoint(0.0, 0.0, lsst.geom.degrees),
lsst.geom.Angle(2.5, lsst.geom.degrees),
filterName="g",
)
self.assertEqual(len(donutCatFull.refCat), 24)
def testDonutCatalogListToDataFrame(self):
refCatList = self._getRefCat()
refObjLoader = self.task.getRefObjLoader(refCatList)
# Check that our refObjLoader loads the available objects
# within a given search radius
donutCatSmall = refObjLoader.loadSkyCircle(
lsst.geom.SpherePoint(0.0, 0.0, lsst.geom.degrees),
lsst.geom.Angle(0.5, lsst.geom.degrees),
filterName="g",
)
fieldObjects = self.task.donutCatalogListToDataFrame(
[donutCatSmall.refCat, donutCatSmall.refCat], ["R22_S99", "R22_S99"]
)
self.assertEqual(len(fieldObjects), 16)
self.assertCountEqual(
fieldObjects.columns,
[
"coord_ra",
"coord_dec",
"centroid_x",
"centroid_y",
"source_flux",
"detector",
],
)
|
gpl-3.0
|
slinderman/pyhawkes
|
test/test_sbm_gibbs.py
|
1
|
2377
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import adjusted_mutual_info_score, adjusted_rand_score
from pyhawkes.internals.network import StochasticBlockModel
from pyhawkes.models import DiscreteTimeNetworkHawkesModelSpikeAndSlab
from pybasicbayes.util.text import progprint_xrange
def test_gibbs_sbm(seed=None):
"""
Create a discrete time Hawkes model and generate from it.
:return:
"""
if seed is None:
seed = np.random.randint(2**32)
print("Setting seed to ", seed)
np.random.seed(seed)
C = 2
K = 100
c = np.arange(C).repeat(np.ceil(K/float(C)))[:K]
T = 1000
dt = 1.0
B = 3
# Generate from a true model
true_p = np.random.rand(C,C) * 0.25
true_network = StochasticBlockModel(K, C, c=c, p=true_p, v=10.0)
true_model = \
DiscreteTimeNetworkHawkesModelSpikeAndSlab(
K=K, dt=dt, B=B, network=true_network)
S,R = true_model.generate(T)
# Plot the true network
plt.ion()
true_im = true_model.plot_adjacency_matrix()
plt.pause(0.001)
# Make a new model for inference
test_network = StochasticBlockModel(K, C, beta=1./K)
test_model = \
DiscreteTimeNetworkHawkesModelSpikeAndSlab(
K=K, dt=dt, B=B, network=test_network)
test_model.add_data(S)
# Gibbs sample
N_samples = 100
c_samples = []
lps = []
for itr in progprint_xrange(N_samples):
c_samples.append(test_network.c.copy())
lps.append(test_model.log_probability())
# Resample the network only
test_model.network.resample((true_model.weight_model.A,
true_model.weight_model.W))
c_samples = np.array(c_samples)
plt.ioff()
# Compute sample statistics for second half of samples
print("True c: ", true_model.network.c)
print("Test c: ", c_samples[-10:, :])
# Compute the adjusted mutual info score of the clusterings
amis = []
arss = []
for c in c_samples:
amis.append(adjusted_mutual_info_score(true_model.network.c, c))
arss.append(adjusted_rand_score(true_model.network.c, c))
plt.figure()
plt.plot(np.arange(N_samples), amis, '-r')
plt.plot(np.arange(N_samples), arss, '-b')
plt.xlabel("Iteration")
plt.ylabel("Clustering score")
plt.show()
test_gibbs_sbm()
|
mit
|
ChinaQuants/blaze
|
blaze/compute/tests/test_comprehensive.py
|
11
|
4570
|
from __future__ import absolute_import, division, print_function
import numpy as np
from pandas import DataFrame
import numpy as np
from odo import resource, into
from datashape.predicates import isscalar, iscollection, isrecord
from blaze.expr import symbol, by
from blaze.interactive import Data
from blaze.compute import compute
from blaze.expr.functions import sin, exp
sources = []
t = symbol('t', 'var * {amount: int64, id: int64, name: string}')
L = [[ 100, 1, 'Alice'],
[ 200, 2, 'Bob'],
[ 300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[ 500, 5, 'Edith']]
df = DataFrame(L, columns=['amount', 'id', 'name'])
x = into(np.ndarray, df)
sources = [df, x]
try:
import sqlalchemy
sql = resource('sqlite:///:memory:::accounts', dshape=t.dshape)
into(sql, L)
sources.append(sql)
except:
sql = None
try:
import bcolz
bc = into(bcolz.ctable, df)
sources.append(bc)
except ImportError:
bc = None
try:
import pymongo
except ImportError:
pymongo = mongo = None
if pymongo:
try:
db = pymongo.MongoClient().db
try:
coll = db._test_comprehensive
except AttributeError:
coll = db['_test_comprehensive']
coll.drop()
mongo = into(coll, df)
sources.append(mongo)
except pymongo.errors.ConnectionFailure:
mongo = None
# {expr: [list-of-exclusions]}
expressions = {
t: [],
t['id']: [],
abs(t['amount']): [],
t.id.max(): [],
t.amount.sum(): [],
t.amount.sum(keepdims=True): [],
t.amount.count(keepdims=True): [],
t.amount.nunique(keepdims=True): [mongo],
t.amount.nunique(): [],
t.amount.head(): [],
t.amount + 1: [mongo],
sin(t.amount): [sql, mongo], # sqlite doesn't support trig
exp(t.amount): [sql, mongo],
t.amount > 50: [mongo],
t[t.amount > 50]: [],
t.like(name='Alic*'): [],
t.sort('name'): [bc],
t.sort('name', ascending=False): [bc],
t.head(3): [],
t.name.distinct(): [],
t[t.amount > 50]['name']: [], # odd ordering issue
t.id.map(lambda x: x + 1, schema='int64', name='id'): [sql, mongo],
t[t.amount > 50]['name']: [],
by(t.name, total=t.amount.sum()): [],
by(t.id, count=t.id.count()): [],
by(t[['id', 'amount']], count=t.id.count()): [],
by(t[['id', 'amount']], total=(t.amount + 1).sum()): [mongo],
by(t[['id', 'amount']], n=t.name.nunique()): [mongo, bc],
by(t.id, count=t.amount.count()): [],
by(t.id, n=t.id.nunique()): [mongo, bc],
# by(t, count=t.count()): [],
# by(t.id, count=t.count()): [],
t[['amount', 'id']]: [x], # https://github.com/numpy/numpy/issues/3256
t[['id', 'amount']]: [x, bc], # bcolz sorting
t[0]: [sql, mongo, bc],
t[::2]: [sql, mongo, bc],
t.id.utcfromtimestamp: [sql],
t.distinct().nrows: [],
t.nelements(axis=0): [],
t.nelements(axis=None): [],
t.amount.truncate(200): [sql]
}
base = df
def df_eq(a, b):
return (list(a.columns) == list(b.columns)
# and list(a.dtypes) == list(b.dtypes)
and into(set, into(list, a)) == into(set, into(list, b)))
def typename(obj):
return type(obj).__name__
def test_base():
for expr, exclusions in expressions.items():
if iscollection(expr.dshape):
model = into(DataFrame, into(np.ndarray, expr._subs({t: Data(base, t.dshape)})))
else:
model = compute(expr._subs({t: Data(base, t.dshape)}))
print('\nexpr: %s\n' % expr)
for source in sources:
if id(source) in map(id, exclusions):
continue
print('%s <- %s' % (typename(model), typename(source)))
T = Data(source)
if iscollection(expr.dshape):
result = into(type(model), expr._subs({t: T}))
if isscalar(expr.dshape.measure):
assert set(into(list, result)) == set(into(list, model))
else:
assert df_eq(result, model)
elif isrecord(expr.dshape):
result = compute(expr._subs({t: T}))
assert into(tuple, result) == into(tuple, model)
else:
result = compute(expr._subs({t: T}))
try:
result = result.scalar()
except AttributeError:
pass
assert result == model
|
bsd-3-clause
|
SigmaQuan/cuda-convnet2
|
shownet.py
|
180
|
18206
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
|
apache-2.0
|
semio/ddf_utils
|
ddf_utils/factory/clio_infra.py
|
1
|
2872
|
# -*- coding: utf-8 -*-
"""functions for scraping data from clio infra website.
Source link: `Clio-infra website`_
.. _`Clio-infra website`: https://www.clio-infra.eu/index.html
"""
import os.path as osp
import pandas as pd
from lxml import etree
import requests
from urllib.parse import urljoin
from .common import DataFactory
class ClioInfraLoader(DataFactory):
url = 'https://clio-infra.eu/index.html'
def _get_home_page(self, url):
response = requests.get(url, verify=False)
content = response.content
tree = etree.fromstring(content, parser=etree.HTMLParser())
return tree
def has_newer_source(self, ver):
print('there is no version info in this site.')
raise NotImplementedError
def load_metadata(self):
tree = self._get_home_page(self.url)
elem = tree.xpath('//div[@class="col-sm-4"]/div[@class="list-group"]/p[@class="list-group-item"]')
res1 = {}
res2 = {}
for e in elem:
try:
name = e.find('a').text
link = e.find('*/a').attrib['href']
if '../data' in link: # it's indicator file
res1[name] = link
else: # it's country file
res2[name] = link
except: # FIXME: add exception class here.
name = e.text
res2[name] = ''
# create the metadata dataframe
md_dataset = pd.DataFrame(columns=['name', 'url', 'type'])
md_dataset['name'] = list(res1.keys())
md_dataset['url'] = list(res1.values())
md_dataset['type'] = 'dataset'
md_country = pd.DataFrame(columns=['name', 'url', 'type'])
md_country['name'] = list(res2.keys())
md_country['url'] = list(res2.values())
md_country['type'] = 'country'
self.metadata = pd.concat([md_dataset, md_country], ignore_index=True)
return self.metadata
def bulk_download(self, out_dir, data_type=None):
if self.metadata is None:
self.load_metadata()
metadata = self.metadata
if data_type:
to_download = metadata[metadata['type'] == data_type]
else:
to_download = metadata
for i, row in to_download.iterrows():
name = row['name']
path = row['url']
file_url = urljoin(self.url, path)
res = requests.get(file_url, stream=True, verify=False)
fn = osp.join(out_dir, f'{name}.xlsx')
print("downloading {} to {}".format(file_url, fn))
with open(fn, 'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
f.close()
print('Done downloading source files.')
|
mit
|
shusenl/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
59
|
76336
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
nlproc/splunkml
|
bin/mcpredict.py
|
1
|
2357
|
#!env python
import os
import sys
sys.path.append(
os.path.join(
os.environ.get( "SPLUNK_HOME", "/opt/splunk/6.1.3" ),
"etc/apps/framework/contrib/splunk-sdk-python/1.3.0",
)
)
from collections import Counter, OrderedDict
from math import log
from nltk import tokenize
import execnet
import json
from splunklib.searchcommands import Configuration, Option
from splunklib.searchcommands import dispatch, validators
from remote_commands import OptionRemoteStreamingCommand, ValidateLocalFile
@Configuration(clear_required_fields=False)
class MCPredict(OptionRemoteStreamingCommand):
model = Option(require=True, validate=ValidateLocalFile(mode='r',extension="pkl",subdir='classifiers',nohandle=True))
code = """
import os, sys, itertools, collections, numbers
try:
import cStringIO as StringIO
except:
import StringIO
import numpy as np
import scipy.sparse as sp
from multiclassify import process_records
from gensim.models import LsiModel, TfidfModel, LdaModel
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.externals import joblib
if __name__ == "__channelexec__":
args = channel.receive()
records = []
for record in channel:
if not record:
break
records.append(record)
if records:
records = np.array(records)
# Try loading existing model
try:
model = joblib.load(args['model'])
encoder = model['encoder']
est = model['est']
target = model['target']
fields = model['fields']
if model.get('text'):
if model['text'] == 'lsi':
textmodel = LsiModel.load(args['model'].replace(".pkl",".%s" % model['text']))
elif model['text'] == 'tfidf':
textmodel = TfidfModel.load(args['model'].replace(".pkl",".%s" % model['text']))
else:
textmodel = model['text']
except Exception as e:
print >> sys.stderr, "ERROR", e
channel.send({ 'error': "Couldn't find model %s" % args['model']})
else:
X, y_labels, textmodel = process_records(records, fields, target, textmodel=textmodel)
print >> sys.stderr, X.shape
y = est.predict(X)
y_labels = encoder.inverse_transform(y)
for i, record in enumerate(records):
record['%s_predicted' % target] = y_labels.item(i)
channel.send(record)
"""
def __dir__(self):
return ['model']
dispatch(MCPredict, sys.argv, sys.stdin, sys.stdout, __name__)
|
apache-2.0
|
PatrickChrist/scikit-learn
|
examples/applications/plot_tomography_l1_reconstruction.py
|
204
|
5442
|
"""
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
|
bsd-3-clause
|
matrogers/pylearn2
|
pylearn2/train_extensions/live_monitoring.py
|
30
|
11536
|
"""
Training extension for allowing querying of monitoring values while an
experiment executes.
"""
__authors__ = "Dustin Webb"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Dustin Webb"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import copy
try:
import zmq
zmq_available = True
except:
zmq_available = False
try:
import matplotlib.pyplot as plt
pyplot_available = True
except:
pyplot_available = False
from functools import wraps
from pylearn2.monitor import Monitor
from pylearn2.train_extensions import TrainExtension
class LiveMonitorMsg(object):
"""
Base class that defines the required interface for all Live Monitor
messages.
"""
response_set = False
def get_response(self):
"""
Method that instantiates a response message for a given request
message. It is not necessary to implement this function on response
messages.
"""
raise NotImplementedError('get_response is not implemented.')
class ChannelListResponse(LiveMonitorMsg):
"""
A message containing the list of channels being monitored.
"""
pass
class ChannelListRequest(LiveMonitorMsg):
"""
A message indicating a request for a list of channels being monitored.
"""
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelListResponse()
class ChannelsResponse(LiveMonitorMsg):
"""
A message containing monitoring data related to the channels specified.
Data can be requested for all epochs or select epochs.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start, end, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
class ChannelsRequest(LiveMonitorMsg):
"""
A message for requesting data related to the channels specified.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start=0, end=-1, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelsResponse(
self.channel_list,
self.start,
self.end,
self.step
)
class LiveMonitoring(TrainExtension):
"""
A training extension for remotely monitoring and filtering the channels
being monitored in real time. PyZMQ must be installed for this extension
to work.
Parameters
----------
address : string
The IP addresses of the interfaces on which the monitor should listen.
req_port : int
The port number to be used to service request.
pub_port : int
The port number to be used to publish updates.
"""
def __init__(self, address='*', req_port=5555, pub_port=5556):
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port != pub_port)
assert(req_port > 1024 and req_port < 65536)
self.req_port = req_port
assert(pub_port > 1024 and pub_port < 65536)
self.pub_port = pub_port
address_template = self.address + ':%d'
self.context = zmq.Context()
self.req_sock = None
if self.req_port > 0:
self.req_sock = self.context.socket(zmq.REP)
self.req_sock.bind(address_template % self.req_port)
self.pub_sock = None
if self.pub_port > 0:
self.pub_sock = self.context.socket(zmq.PUB)
self.req_sock.bind(address_template % self.pub_port)
# Tracks the number of times on_monitor has been called
self.counter = 0
@wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = Monitor.get_monitor(model)
try:
rsqt_msg = self.req_sock.recv_pyobj(flags=zmq.NOBLOCK)
# Determine what type of message was received
rsp_msg = rsqt_msg.get_response()
if isinstance(rsp_msg, ChannelListResponse):
rsp_msg.data = list(monitor.channels.keys())
if isinstance(rsp_msg, ChannelsResponse):
channel_list = rsp_msg.channel_list
if (
not isinstance(channel_list, list)
or len(channel_list) == 0
):
channel_list = []
result = TypeError(
'ChannelResponse requires a list of channels.'
)
result = {}
for channel_name in channel_list:
if channel_name in monitor.channels.keys():
chan = copy.deepcopy(
monitor.channels[channel_name]
)
end = rsp_msg.end
if end == -1:
end = len(chan.batch_record)
# TODO copying and truncating the records individually
# like this is brittle. Is there a more robust
# solution?
chan.batch_record = chan.batch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.epoch_record = chan.epoch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.example_record = chan.example_record[
rsp_msg.start:end:rsp_msg.step
]
chan.time_record = chan.time_record[
rsp_msg.start:end:rsp_msg.step
]
chan.val_record = chan.val_record[
rsp_msg.start:end:rsp_msg.step
]
result[channel_name] = chan
else:
result[channel_name] = KeyError(
'Invalid channel: %s' % rsp_msg.channel_list
)
rsp_msg.data = result
self.req_sock.send_pyobj(rsp_msg)
except zmq.Again:
pass
self.counter += 1
class LiveMonitor(object):
"""
A utility class for requested data from a LiveMonitoring training
extension.
Parameters
----------
address : string
The IP address on which a LiveMonitoring process is listening.
req_port : int
The port number on which a LiveMonitoring process is listening.
"""
def __init__(self, address='127.0.0.1', req_port=5555):
"""
"""
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port > 0)
self.req_port = req_port
self.context = zmq.Context()
self.req_sock = self.context.socket(zmq.REQ)
self.req_sock.connect(self.address + ':' + str(self.req_port))
self.channels = {}
def list_channels(self):
"""
Returns a list of the channels being monitored.
"""
self.req_sock.send_pyobj(ChannelListRequest())
return self.req_sock.recv_pyobj()
def update_channels(self, channel_list, start=-1, end=-1, step=1):
"""
Retrieves data for a specified set of channels and combines that data
with any previously retrived data.
This assumes all the channels have the same number of values. It is
unclear as to whether this is a reasonable assumption. If they do not
have the same number of values then it may request to much or too
little data leading to duplicated data or wholes in the data
respectively. This could be made more robust by making a call to
retrieve all the data for all of the channels.
Parameters
----------
channel_list : list
A list of the channels for which data should be requested.
start : int
The starting epoch for which data should be requested.
step : int
The number of epochs to be skipped between data points.
"""
assert((start == -1 and end == -1) or end > start)
if start == -1:
start = 0
if len(self.channels.keys()) > 0:
channel_name = list(self.channels.keys())[0]
start = len(self.channels[channel_name].epoch_record)
self.req_sock.send_pyobj(ChannelsRequest(
channel_list, start=start, end=end, step=step
))
rsp_msg = self.req_sock.recv_pyobj()
if isinstance(rsp_msg.data, Exception):
raise rsp_msg.data
for channel in rsp_msg.data.keys():
rsp_chan = rsp_msg.data[channel]
if isinstance(rsp_chan, Exception):
raise rsp_chan
if channel not in self.channels.keys():
self.channels[channel] = rsp_chan
else:
chan = self.channels[channel]
chan.batch_record += rsp_chan.batch_record
chan.epoch_record += rsp_chan.epoch_record
chan.example_record += rsp_chan.example_record
chan.time_record += rsp_chan.time_record
chan.val_record += rsp_chan.val_record
def follow_channels(self, channel_list):
"""
Tracks and plots a specified set of channels in real time.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
"""
if not pyplot_available:
raise ImportError('pyplot needs to be installed for '
'this functionality.')
plt.clf()
plt.ion()
while True:
self.update_channels(channel_list)
plt.clf()
for channel_name in self.channels:
plt.plot(
self.channels[channel_name].epoch_record,
self.channels[channel_name].val_record,
label=channel_name
)
plt.legend()
plt.ion()
plt.draw()
|
bsd-3-clause
|
ShujiaHuang/AsmVar
|
src/AsmvarGenotype/GMM/SVGenotyping.py
|
2
|
25369
|
"""
===================================================
Use Guassion Misture Model to estimate SV genotype
from population data. VCF Format
===================================================
Author : Shujia Huang & Siyang Liu
Date : 2013-12-14 10:48:02
Modify : 2013-12-16 13:58:57 Debuging
"""
import optparse
import os
import re
import string
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import GMM2D as mixture
def GetPPrate(fmat, formatInfo):
trainIndx, ppr, pp = [], [], []
theta = 1.0
for i, t in enumerate(formatInfo):
# 0/1:52,3:6,13,0,0,0,1,0:6,14:20-121512-121512:13:INS
fi = t.split(':')
if t != './.':
rr = string.atof(fi[fmat['AA']].split(',')[0])
aa = string.atof(fi[fmat['AA']].split(',')[1])
else:
rr, aa = 0, 0
r = theta
if rr + aa > 0:
r = rr/(rr + aa)
trainIndx.append(i)
ppr.append([r])
pp.append([rr, aa])
#return range(len(ppr)), np.array(ppr), np.array(pp) # All for training!
return trainIndx, np.array(ppr), np.array(pp)
###################
def UpdateInfoFromGMM(gmm, ppr, grey, red, green, blue, data, sam2col, family):
# gmm : It's the GMM model
# data: It's the vcf line
child = []
for k,v in family.items():
if k not in sam2col or v[0] not in sam2col or v[1] not in sam2col: continue
child.append(sam2col[k])
child = set(child)
# determine the relationship of predictLabel and the SV genotype by the result of gmm.means_
c2g = gmm.Label2Genotype()
g2c = {v:k for k,v in c2g.items()}
if not gmm.converged_:
if data[6] == '.' or data[6] == 'PASS':
data[6] = 'FALSE_GENOTYPE'
else:
data[6] = 'FALSE_GENOTYPE;' + data[6]
predict = gmm.predict(ppr)
predictProba = gmm.predict_proba(ppr)
weights = gmm.weights_
genotypeQuality = []
for p,i in enumerate(predict):
pd = predictProba[p][i]
if pd > PRECISION: pd = PRECISION
genotypeQuality.append(int(-10 * np.log10(1.0 - pd) + 0.5))
logprob, posteriors = gmm.score_samples( ppr )
for i in range(len(posteriors)):
posteriors[i][posteriors[i] == 0.0] = 1.0 - PRECISION
loglhd = logprob[:, np.newaxis] + np.log( posteriors ) - np.log(weights) # The Log(e) genotype likelihoods
lhd = np.exp(loglhd)
for i in range(len(lhd)): lhd[i][lhd[i] == 0.0] = 1.0 - PRECISION
loglhd = np.log10(lhd) # change the radices
# Add info to the format fields for each individual
fmat = {t:i for i,t in enumerate(data[8].split(':'))}
if 'GQ' not in fmat : data[8] += ':GQ'
if 'PL' not in fmat : data[8] += ':PL'
first, gnt, ac, iac, N = True, [], 0, 0, 0
refCount, hetCount, homCount = 1, 1, 1 # Assign 1 to prevent 0 in denominator
for i in range(len(data[9:])):
if data[9+i] == './.':
for j in range(len(fmat) - 1): data[9+i] += ':.'
data[9+i] += ':0:65535,65535,65535'
gnt.append('./.')
continue
fi = data[9+i].split(':')
# Change raw genotype
gt = 0
if fi[0] != './.' and fi[0] != '0/0' :
tt = [string.atoi(g) for g in fi[0].split('/')]
if tt[0] > 1:
gt = tt[0]
elif tt[1] > 1:
gt = tt[1]
# Change the genotype
if gt > 0:
tt = [string.atoi(g) for g in c2g[predict[i]].split('/')]
if tt[0] > 0: tt[0] = gt
if tt[1] > 0: tt[1] = gt
fi[0] = str(tt[0]) + '/' + str(tt[1])
else:
fi[0] = c2g[predict[i]]
pl = [int(p+0.5) for p in -10 * loglhd[i]]
gnt.append(fi[0])
if fi[0] == '1/1':
if i not in child: iac += 2
ac += 2
homCount += 1
if fi[0] == '0/1':
if i not in child : iac += 1
ac += 1
hetCount += 1
if fi[0] == '0/0':
if i not in child : iac += 0
ac += 0
refCount += 1
if fi[0] != './.': N += 1
if '0/0' not in g2c:
phredScale = '65535'
if first : weight = '0'
else:
phredScale = str(pl[g2c['0/0']])
if first : weight = str(weights[g2c['0/0']])
if '0/1' not in g2c:
phredScale += ',65535'
if first : weight += ',0'
else :
phredScale += ',' + str(pl[g2c['0/1']])
if first : weight += ',' + str(weights[g2c['0/1']])
if '1/1' not in g2c :
phredScale += ',65535'
if first : weight += ',0'
else :
phredScale += ',' + str(pl[g2c['1/1']])
if first: weight += ',' + str(weights[g2c['1/1']])
first = False
if 'GQ' not in fmat:
fi.append(str(genotypeQuality[i]))
else:
fi[fmat['GQ']] = str(genotypeQuality[i])
if 'PL' not in fmat:
fi.append(phredScale)
else:
fi[fmat['PL']] = phredScale
data[9+i] = ':'.join(fi)
if N == 0: N = 1
# expected reference allele frequency
p = float(2.0 * refCount + hetCount) / (2.0 * (refCount + hetCount + homCount))
q = 1.0 - p # expected alternative allele frequency
f = 1.0 - (hetCount / (2.0 * p * q * N)) # The hetCount VS expected of hetCount
if re.search(r';InbCoeff=([^;]+)', data[7]):
data[7] = re.sub(r';InbCoeff=([^;]+)', ';InbCoeff=%.2f' % f, data[7])
else:
data[7] += ';InbCoeff=%.2f' % f
# The all sample are totally 1/1 or 0/0!
if homCount == N + 1 or refCount == N + 1:
if data[6] == '.' or data[6] == 'PASS':
data[6] = 'FALSE_GENOTYPE'
elif 'FALSE_GENOTYPE' not in data[6]:
data[6] = 'FALSE_GENOTYPE;' + data[6]
gmm.converged_ = False
ng = set([])
if gmm.n_components > 1 and gmm.converged_:
_, _, _, ng = gmm.Mendel(gnt, sam2col, family)
if gmm.converged_:
for i,g in enumerate([c2g[c] for c in predict]): # For figure
grey.append([ppr[i]])
if gnt[i] == './.': continue
if g == '0/0':
if i in ng:
green.append([ppr[i]])
else:
green.append([ppr[i]])
if g == '0/1':
if i in ng:
red.append([ppr[i]])
else:
red.append([ppr[i]])
if g == '1/1':
if i in ng:
blue.append([ppr[i]])
else:
blue.append([ppr[i]])
return np.array(genotypeQuality), np.array(gnt), ac, iac, f
def DrawModel2(gmm, ppr) :
#fig = plt.figure()
minv = np.min([-1.5 * np.max(ppr), 1.5 * np.max(ppr)])
maxv = np.max([-1.5 * np.max(ppr), 1.5 * np.max(ppr)])
x = np.linspace( minv, maxv )
y = np.linspace( minv, maxv )
X, Y = np.meshgrid(x, y)
XX = np.c_[X.ravel(), Y.ravel()]
Z = np.log( np.abs(gmm.score_samples(XX)[0]) ) #np.log(-gmm.score_samples(XX)[0])
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z)
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(ppr[:, 0], ppr[:, 1], .8)
plt.axis('tight')
# plt.show()
def DrawModel(figureFile, gmm, ppr, pp):
predict = gmm.predict(ppr)
mu = gmm.means_
fig = plt.figure()
color1 = ['bo', 'ro', 'go']
labels = ['Hom','Het','Ref']
for i in range( len(mu) ) :
x = []
for j in range(len(predict)) :
if predict[j] == i : x.append(pp[j])
x = np.array(x)
plt.plot(x[:,0], x[:,1], color1[i], label = labels[i])
plt.legend()
plt.xlabel('Reference depth', fontsize=14)
plt.ylabel('Alternate depth', fontsize=14)
fig.savefig(figureFile + '.png')
fig.savefig(figureFile + '.pdf')
def DrawFig1 (figureFile, title, red, green, blue, grey) :
numbins = 50
fig = plt.figure(1)
plt.title(title, fontsize=12)
if len( green ) > 0 :
n, bins, patches = plt.hist(green[:,0], numbins, normed=1, facecolor='green' , alpha= .8 )
if len( blue ) > 0 :
n, bins, patches = plt.hist(blue[:,0] , numbins, normed=1, facecolor='blue' , alpha= .8 )
if len( red ) > 0 :
n, bins, patches = plt.hist(red[:,0] , numbins, normed=1, facecolor='red' , alpha= .8 )
plt.xlim( 0, 1.3)
plt.ylabel('Normed Number', fontsize=16)
plt.xlabel('PEP-Ratio VS Reference', fontsize=16)
fig.savefig(figureFile + '.png')
fig.savefig(figureFile + '.pdf')
def DrawAC ( figPrefix, title, green, blue, red, power, alleleCount, ialleleCount, inbCoff, svsize, numbins = 60 ) :
highNum = 3
widNum = 2
fig = plt.figure(num=None, figsize=(4 * widNum, 3 * highNum), facecolor='w', edgecolor='k')
if len( alleleCount ) > 0 :
plt.subplot(321)
plt.title('AC Number', fontsize=12)
plt.hist(alleleCount , numbins, histtype='bar', normed=1, facecolor = 'c', color=['c'] )
plt.ylabel('#', fontsize=12)
if len ( ialleleCount ) > 0 :
plt.subplot(322)
plt.title('IAC Number', fontsize=12)
plt.hist(ialleleCount , numbins, histtype='bar', normed=1, facecolor = 'c', color=['c'] )
plt.ylabel('#', fontsize=12)
if len( inbCoff ) > 0 :
plt.subplot(323)
plt.hist(inbCoff, 20 ,normed=1, facecolor = 'c', color=['c'] )
plt.xlabel('1.0-hetCount/Expected_hetCount',fontsize=12)
plt.ylabel('#', fontsize=14)
plt.subplot(324)
plt.title(title, fontsize=12)
if len( green ) > 0:
plt.hist(green[:,0], numbins, normed=1, facecolor='green' , color=['g'], label=['Het'], alpha= .8)
if len( blue ) > 0:
plt.hist(blue[:,0], numbins, normed=1, facecolor='blue' , color=['b'], label=['Hom'], alpha= .8)
if len( red ) > 0:
plt.hist(red[:,0], 30, normed=1, facecolor='red' , color=['r'], label=['Ref'], alpha= .8)
plt.legend()
plt.xlim( 0, 2.0)
plt.ylabel('Normed Number', fontsize=12)
plt.xlabel('PEPE Ratio' , fontsize=12)
if len ( power ) > 0 :
plt.subplot(313)
plt.bar( np.arange(len(power)), power[:,1], color = 'c')
plt.xticks( np.arange(len(power)) + 0.5, power[:,0], rotation = 90)
plt.ylim(0.0, 1.2)
plt.xlabel('SV SIZE(bp)', fontsize=12)
plt.ylabel('Proper of Genotype', fontsize=12)
fig.savefig(figPrefix + '.png')
fig.savefig(figPrefix + '.pdf')
#plt.show()
def DrawFig(figureFile, alleleCount, red, green, blue):
numbins = len(set(alleleCount))
fig = plt.figure(num=None, facecolor='w', edgecolor='k')
plt.subplot(211)
if len( alleleCount ) > 0 :
plt.subplot(321)
plt.title('AC Number', fontsize=14)
plt.hist(alleleCount , numbins, histtype='bar', normed=1, facecolor = 'c', color=['c'] )
plt.ylabel('Normed Number', fontsize=14)
plt.subplot(212)
numbins = 60
plt.title('Data Distribution', fontsize=14)
if len(green) > 0:
plt.hist(green[:,0], numbins, histtype='bar', normed=1, facecolor='green', color=['g'], label=['Ref'], alpha= .8 )
if len(blue) > 0:
plt.hist(blue[:,0] , numbins, histtype='bar', normed=1, facecolor='blue' , color=['b'], label=['Hom'], alpha= .8 )
if len(red) > 0:
plt.hist(red[:,0] , numbins, histtype='bar', normed=1, facecolor='red' , color=['r'], label=['Het'], alpha= .8 )
plt.legend()
plt.ylabel('Normed Number', fontsize=14)
fig.savefig(figPrefix + '.png')
fig.savefig(figPrefix + '.pdf')
##################
def LoadFamily(file):
if len(file) == 0: return {}
family = {}
for line in open(file):
#1006 1006-05 1006-01 1006-02 0 0
line = line.strip('\n') # Cut the reture char at the end.
col = line.split()
if col[1] in family.keys():
print >> sys.stderr, 'The key is already in family! Your file may have the duplication sample name : ', col[1], '\n'
sys.exit(1)
family[col[1]] = [col[2], col[3]]
return family
############################# Main Process #############################
def main(opt):
gqSummary = {1: 0.0, 2: 0.0, 3: 0.0, 10: 0.0, 20: 0.0, 30: 0.0, 'sum': 0.0, 'Yes': 0.0, 'No': 0.0}
family = LoadFamily(opt.family)
outPrefix = opt.outPrefix
for f in infile:
outHandle = open(outPrefix + '.vcf', 'w')
outFailGtyHandle = open(outPrefix + '.false_genotype.vcf', 'w')
print >> sys.stderr, '# *** Reading File: ', f, ' ***\n'
if f[-3:] == '.gz':
if len(opt.chroms) > 0:
gzformat, I = True, os.popen("tabix -h %s %s " % (f, ' '.join(opt.chroms)))
else:
gzformat, I = True, os.popen("gzip -dc %s " % f)
else:
gzformat, I = False, open(f)
# 'grey', 'red', 'green' and 'blue' are used for draw figure
grey, red, green, blue, inbCoff, svsize = [], [], [], [], [], []
power, alleleCount, ialleleCount = {}, [], []
sam2col = {}
mdr, mde, mdr_t, mde_t, mde_n = 0.0, 0.0, 0.0, 0.0, 0
while 1:
# Read 100000 lines at one time make effectly
lines = I.readlines(100000)
if not lines : break
for line in lines:
line = line.strip('\n') # Cut the reture char at the end.
col = line.split()
if re.search(r'^##FORMAT=<ID=GT', line):
outHandle.write(line + '\n')
outFailGtyHandle.write(line + '\n')
outHandle.write('##FORMAT=<ID=GQ,Number=1,Type=Integer,Description="Genotype Quality. -10*log10(1-p), p=w*N/(sigma(W*N)) N is gaussian density (p: The posterior probability of Genotype call is correct)">\n')
outFailGtyHandle.write('##FORMAT=<ID=GQ,Number=1,Type=Integer,Description="Genotype Quality. -10*log10(1-p), p=w*N/(sigma(W*N)) N is gaussian density (p: The posterior probability of Genotype call is correct)">\n')
outHandle.write('##FORMAT=<ID=PL,Number=1,Type=String,Description="Phred-scaled genotype likelihood. Rounded to the closest integer as defined in the VCF specification (The lower the better). The value calculate -10*log10(p), p is the predict posterior probability. And the order is : HOM_REF,HETE_VAR,HOM_VAR">\n')
outFailGtyHandle.write('##FORMAT=<ID=PL,Number=1,Type=String,Description="Phred-scaled genotype likelihood. Rounded to the closest integer as defined in the VCF specification (The lower the better). The value calculate -10*log10(p), p is the predict posterior probability. And the order is : HOM_REF,HETE_VAR,HOM_VAR">\n')
continue
elif re.search(r'^##INFO=<ID=AC', line):
outHandle.write(line + '\n')
outFailGtyHandle.write(line + '\n')
outHandle.write('##FILTER=<ID=FALSE_GENOTYPE,Description="False in genotype process">\n')
outFailGtyHandle.write('##FILTER=<ID=FALSE_GENOTYPE,Description="False in genotype process">\n')
outHandle.write('##INFO=<ID=InbCoeff,Number=1,Type=Float,Description="Inbreeding coefficient: 1.0 - hetCount/Expected_hetCount">\n')
outFailGtyHandle.write('##INFO=<ID=InbCoeff,Number=1,Type=Float,Description="Inbreeding coefficient: 1.0 - hetCount/Expected_hetCount">\n')
elif re.search(r'^##', line):
outHandle.write(line + '\n')
outFailGtyHandle.write(line + '\n')
continue
elif re.search(r'^#', line):
outHandle.write(line + '\n')
outFailGtyHandle.write(line + '\n')
sam2col = {sam:i for i,sam in enumerate(col[9:])}
continue
if (len(opt.chroms)>0) and (col[0] not in opt.chroms): continue
fmat = {t:i for i,t in enumerate(col[8].split(':'))}
if 'AA' not in fmat: continue
if col[6] == 'PASS': col[6] == '.'
trainIndx, ppr, pp = GetPPrate(fmat, col[9:])
sample_size = len(col[9:])
if len(trainIndx) < 10:
print >> sys.stderr, "[WARNING] Your effective sample size is less than 10. The Genotype quality may be low!\n"
if (sample_size < 10 and len(trainIndx) < sample_size):
gqSummary['No'] += 1.0
if col[6] == '.' or col[6] == 'PASS':
col[6] = 'FALSE_GENOTYPE'
elif 'FALSE_GENOTYPE' not in col[6]:
col[6] = 'FALSE_GENOTYPE;' + col[6]
if 'GQ' not in fmat: col[8] += ':GQ'
if 'PL' not in fmat: col[8] += ':PL'
for i in range(len(col[9:])):
if col[9+i] == './.':
for j in range(len(fmat) - 1): col[9+i] += ':.'
col[9+i] += ':0:65535,65535,65535'
else:
fi = col[9+i].split(':')
fi[0] = './.'
if 'GQ' not in fmat:
fi.append('0')
else:
fi[fmat['GQ']] = '0'
if 'PL' not in fmat:
fi.append('65535,65535,65535')
else:
fi[fmat['PL']] = '65535,65535,65535'
col[9+i] = ':'.join(fi)
outFailGtyHandle.write('\t'.join(col) + '\n')
continue
nc = 3
clf = mixture.GMM(n_components=nc, n_iter=50, n_init=8, covariance_type='full', thresh=0.001, params='wmc')
clf.fit(ppr[trainIndx])
if not clf.converged_:
print >> sys.stderr, '#+++ Position:', col[0], col[1], "couldn't converge with 3 components in GMM. Now trying 2 components ... "
nc = 2
clf = mixture.GMM(n_components=nc, n_iter=50, n_init=8, covariance_type='full', thresh=0.001, params='wmc')
clf.fit(ppr[trainIndx])
if not clf.converged_:
print >> sys.stderr, '#+++ Position:', col[0], col[1], "couldn't converge with 2 components in GMM. Now trying 1 components ... "
nc = 1
clf = mixture.GMM(n_components=nc, n_iter=50, n_init=8, covariance_type='full', thresh=0.001, params='wmc')
clf.fit(ppr[trainIndx])
print >> sys.stderr, '#--> Position:', col[0], col[1], 'with', clf.n_components,'components. Converge information :', clf.converged_
print >> sys.stderr, '# Means: \n', clf.means_, '\nCovars: \n', clf.covars_,'\nWeight', clf.weights_, '\n*************'
genotypeQuality,gnt,ac,iac,ef = UpdateInfoFromGMM(clf, ppr, grey, red, green, blue, col, sam2col, family)
inbCoff.append(ef)
if ef > -0.7 and clf.converged_:
alleleCount.append(ac)
ialleleCount.append(iac)
else:
if col[6] == '.' or col[6] == 'PASS':
col[6] = 'FALSE_GENOTYPE'
elif 'FALSE_GENOTYPE' not in col[6]:
col[6] = 'FALSE_GENOTYPE;' + col[6]
clf.converged_ = False
fmat = {t:i for i,t in enumerate(col[8].split(':'))}
for i in range(len(col[9:])):
fi = col[9+i].split(':')
if fi[0] != './.':
rr = string.atof(fi[fmat['AA']].split(',')[0])
aa = string.atof(fi[fmat['AA']].split(',')[1])
else:
rr, aa = 0, 0
# still keep the genotype info in 'FALSE_GENOTYPE'
# if rr + aa == 0 or 'FALSE_GENOTYPE' in col[6]:
if rr + aa == 0:
fi[fmat['GQ']] = '0'
fi[fmat['GT']] = './.'
fi[fmat['PL']] = '.'
col[9+i] = ':'.join(fi)
gnt[i] = fi[fmat['GT']]
genotypeQuality[i] = 0
if 'FALSE_GENOTYPE' in col[6]: gnt[i] = './.'
if clf.converged_ and len(family) > 0:
sm, sn, snum, _ = clf.Mendel(gnt, sam2col, family)
mdr_t += sm
mde_t += sn
mde_n += snum
if 'FALSE_GENOTYPE' in col[6]:
outFailGtyHandle.write('\t'.join(col) + '\n')
else:
outHandle.write('\t'.join(col) + '\n')
if clf.converged_:
gqSummary['Yes'] += 1.0
gqSummary['sum'] += len(ppr)
gqSummary[10] += len(genotypeQuality[genotypeQuality>=10])
gqSummary[20] += len(genotypeQuality[genotypeQuality>=20])
gqSummary[30] += len(genotypeQuality[genotypeQuality>=30])
gqSummary[clf.n_components] += 1.0
else:
gqSummary['No'] += 1.0
I.close()
outHandle.close()
outFailGtyHandle.close()
if gqSummary['sum'] == 0: gqSummary['sum'] = 1.0
if gqSummary['Yes'] + gqSummary['No'] == 0:
gqSummary['Yes'] = 1.0
gqSummary['No'] = 1e9
if gqSummary['Yes'] == 0:
gqSummary['Yes'] = 1.0
mderr_t = '-'
if mdr_t + mde_t > 0.0:
mderr_t = mde_t/(mdr_t+mde_t)
print >> sys.stderr, '\n******** Output Summary information ************************************\n'
print >> sys.stderr, '# ** The count of positions which can be genotype:',int(gqSummary['Yes']),',',gqSummary['Yes']/(gqSummary['Yes']+gqSummary['No'])
print >> sys.stderr, '# ** (Just for the genotype positions)The mendelian violation of', f, 'is : ',mderr_t, '\t', mde_n
print >> sys.stderr, '# ** (Just for the genotype positions)Proportion of 1 component : ', gqSummary[1] / gqSummary['Yes']
print >> sys.stderr, '# ** (Just for the genotype positions)Proportion of 2 component : ', gqSummary[2] / gqSummary['Yes']
print >> sys.stderr, '# ** (Just for the genotype positions)Proportion of 3 component : ', gqSummary[3] / gqSummary['Yes']
print >> sys.stderr, '# ** (Just for the genotype positions)The ratio of Homo-Ref(0/0): ', len(green) / gqSummary['sum']
print >> sys.stderr, '# ** (Just for the genotype positions)The ratio of Hete-Var(0/1): ', len(red) / gqSummary['sum']
print >> sys.stderr, '# ** (Just for the genotype positions)The ratio of Homo-Var(1/1): ', len(blue) / gqSummary['sum']
print >> sys.stderr, '# ** (Just for the genotype positions)Genotype Quality >= 10 : ', gqSummary[10] / gqSummary['sum']
print >> sys.stderr, '# ** (Just for the genotype positions)Genotype Quality >= 20 : ', gqSummary[20] / gqSummary['sum']
print >> sys.stderr, '# ** (Just for the genotype positions)Genotype Quality >= 30 : ', gqSummary[30] / gqSummary['sum']
DrawFig(figPrefix, np.array(alleleCount), np.array(red), np.array(green), np.array(blue))
if __name__ == '__main__':
usage = "Usage : %prog [option] [vcfInfile] > Output"
optp = optparse.OptionParser(usage=usage)
optp.add_option("-c", "--chr", dest="chroms", metavar="CHR",
help="process only specified chromosomes, separated by ','. "
"[default: all]\nexample: --chroms=chr1,chr2", default=[])
optp.add_option("-p", "--ped", dest="family", metavar="PED", help="Family information. ", default=[])
optp.add_option("-f", "--fig", dest="figure", metavar="FIG", help="The prefix of figure about the GMM.", default=[])
optp.add_option("-o", "--out", dest="outPrefix", metavar="OUT", help="The prefix of output. [out]", default = 'out')
opt, infile = optp.parse_args()
figPrefix = 'test'
if len(infile) == 0:
optp.error("Required at least one [vcfInfile]\n")
if len(opt.figure) > 0:
figPrefix = opt.figure
if any(opt.chroms):
opt.chroms = opt.chroms.split(',')
COMPONENT_NUM = 3 # The tyoe number of genotype
PRECISION = 0.9999999999
main(opt)
print >> sys.stderr, '\n# [INFO] Closing the two Ouput files:\n -- (1/4) %s' % (opt.outPrefix + '.vcf')
print >> sys.stderr, ' -- (2/4) %s' % (opt.outPrefix + '.false_genotype.vcf')
print >> sys.stderr, ' -- (3/4) %s' % (opt.figure + '.pdf')
print >> sys.stderr, ' -- (4/4) %s' % (opt.figure + '.png')
print >> sys.stderr, '******************************* ALL DONE *******************************'
|
mit
|
imperial-genomics-facility/data-management-python
|
test/utils/projectutils_test.py
|
1
|
16333
|
import os, unittest
import pandas as pd
from sqlalchemy import create_engine
from igf_data.igfdb.igfTables import Base,Project,User,ProjectUser,Sample,Experiment,Run,Collection,Collection_group,File
from igf_data.igfdb.baseadaptor import BaseAdaptor
from igf_data.igfdb.projectadaptor import ProjectAdaptor
from igf_data.igfdb.useradaptor import UserAdaptor
from igf_data.igfdb.sampleadaptor import SampleAdaptor
from igf_data.igfdb.experimentadaptor import ExperimentAdaptor
from igf_data.igfdb.runadaptor import RunAdaptor
from igf_data.igfdb.collectionadaptor import CollectionAdaptor
from igf_data.igfdb.fileadaptor import FileAdaptor
from igf_data.igfdb.platformadaptor import PlatformAdaptor
from igf_data.igfdb.seqrunadaptor import SeqrunAdaptor
from igf_data.utils.dbutils import read_dbconf_json
from igf_data.utils.fileutils import get_temp_dir,remove_dir
from igf_data.utils.projectutils import get_files_and_irods_path_for_project,mark_project_as_withdrawn
from igf_data.utils.projectutils import get_project_read_count,mark_project_barcode_check_off,get_seqrun_info_for_project,mark_project_and_list_files_for_cleanup
class Projectutils_test1(unittest.TestCase):
def setUp(self):
self.dbconfig = 'data/dbconfig.json'
dbparam = read_dbconf_json(self.dbconfig)
data = [
{'project_igf_id': 'IGFP001_test1_24-1-18',},
{'project_igf_id': 'IGFP002_test1_24-1-18',
'barcode_check':'ON'},
{'project_igf_id': 'IGFP003_test1_24-1-18',
'barcode_check':'OFF'}]
self.data = pd.DataFrame(data)
base = BaseAdaptor(**dbparam)
self.engine = base.engine
self.dbname = dbparam['dbname']
Base.metadata.create_all(self.engine)
self.session_class = base.get_session_class()
def tearDown(self):
Base.metadata.drop_all(self.engine)
os.remove(self.dbname)
def test_mark_project_barcode_check_off(self):
pr = ProjectAdaptor(**{'session_class':self.session_class})
pr.start_session()
pr.store_project_and_attribute_data(self.data)
pr.close_session()
mark_project_barcode_check_off(
project_igf_id='IGFP001_test1_24-1-18',
session_class=self.session_class) # no attribute record
pr.start_session()
attribute_check = \
pr.check_project_attributes(
project_igf_id='IGFP001_test1_24-1-18',
attribute_name='barcode_check')
self.assertTrue(attribute_check)
pr_attributes = \
pr.get_project_attributes(
project_igf_id='IGFP001_test1_24-1-18',
attribute_name='barcode_check')
for pr_attribute in pr_attributes.to_dict(orient='records'):
self.assertEqual(pr_attribute['attribute_value'],'OFF')
pr_attributes = \
pr.get_project_attributes(
project_igf_id='IGFP002_test1_24-1-18',
attribute_name='barcode_check')
for pr_attribute in pr_attributes.to_dict(orient='records'):
self.assertEqual(pr_attribute['attribute_value'],'ON')
pr.close_session()
mark_project_barcode_check_off(
project_igf_id='IGFP002_test1_24-1-18',
session_class=self.session_class) # barcode check ON
pr.start_session()
pr_attributes = \
pr.get_project_attributes(
project_igf_id='IGFP002_test1_24-1-18',
attribute_name='barcode_check')
for pr_attribute in pr_attributes.to_dict(orient='records'):
self.assertEqual(pr_attribute['attribute_value'],'OFF')
pr_attributes = \
pr.get_project_attributes(
project_igf_id='IGFP003_test1_24-1-18',
attribute_name='barcode_check')
for pr_attribute in pr_attributes.to_dict(orient='records'):
self.assertEqual(pr_attribute['attribute_value'],'OFF')
pr.close_session()
mark_project_barcode_check_off(
project_igf_id='IGFP003_test1_24-1-18',
session_class=self.session_class) # barcode check OFF
pr.start_session()
pr_attributes = \
pr.get_project_attributes(
project_igf_id='IGFP003_test1_24-1-18',
attribute_name='barcode_check')
for pr_attribute in pr_attributes.to_dict(orient='records'):
self.assertEqual(pr_attribute['attribute_value'],'OFF')
pr.close_session()
class Projectutils_test2(unittest.TestCase):
def setUp(self):
self.dbconfig = 'data/travis_dbconf.json'
dbparam = read_dbconf_json(self.dbconfig)
base = BaseAdaptor(**dbparam)
self.engine = base.engine
self.dbname = dbparam['dbname']
Base.metadata.create_all(self.engine)
self.session_class = base.get_session_class()
platform_data = [{
"platform_igf_id" : "M001",
"model_name" : "MISEQ" ,
"vendor_name" : "ILLUMINA" ,
"software_name" : "RTA",
"software_version" : "RTA1.18.54"}]
flowcell_rule_data = [{
"platform_igf_id":"M001",
"flowcell_type":"MISEQ",
"index_1":"NO_CHANGE",
"index_2":"NO_CHANGE"}]
seqrun_data = [
{'seqrun_igf_id':'SeqrunA',
'flowcell_id':'000000000-D0YLK',
'platform_igf_id':'M001',
'flowcell':'MISEQ'},
{'seqrun_igf_id':'SeqrunB',
'flowcell_id':'000000000-D0YLL',
'platform_igf_id':'M001',
'flowcell':'MISEQ'}]
project_data = [
{'project_igf_id':'ProjectA'},
{'project_igf_id':'ProjectB'}]
user_data = [
{'name':'UserA',
'email_id':'[email protected]',
'username':'usera'},
{'name':'UserB',
'email_id':'[email protected]',
'username':'userb'}]
project_user_data = [
{'project_igf_id': 'ProjectA',
'email_id': '[email protected]',
'data_authority':True},
{'project_igf_id': 'ProjectA',
'email_id': '[email protected]'},
{'project_igf_id': 'ProjectB',
'email_id': '[email protected]',
'data_authority':True},
{'project_igf_id': 'ProjectB',
'email_id': '[email protected]'}]
sample_data = [
{'sample_igf_id':'SampleA',
'project_igf_id':'ProjectA'},
{'sample_igf_id':'SampleB',
'project_igf_id':'ProjectB'}]
experiment_data = [
{'experiment_igf_id':'ExperimentA',
'sample_igf_id':'SampleA',
'library_name':'SampleA',
'platform_name':'MISEQ',
'project_igf_id':'ProjectA'},
{'experiment_igf_id':'ExperimentB',
'sample_igf_id':'SampleB',
'library_name':'SampleB',
'platform_name':'MISEQ',
'project_igf_id':'ProjectB'}]
run_data = [
{'run_igf_id':'RunA_A',
'experiment_igf_id':'ExperimentA',
'seqrun_igf_id':'SeqrunA',
'lane_number':'1'},
{'run_igf_id':'RunA_B',
'experiment_igf_id':'ExperimentA',
'seqrun_igf_id':'SeqrunB',
'lane_number':'1'}]
file_data = [
{'file_path':'/path/RunA_A_R1.fastq.gz',
'location':'HPC_PROJECT',
'md5':'fd5a95c18ebb7145645e95ce08d729e4',
'size':'1528121404'},
{'file_path':'/path/ExperimentA.cram',
'location':'HPC_PROJECT',
'md5':'fd5a95c18ebb7145645e95ce08d729e4',
'size':'1528121404'},
{'file_path':'/path/ExperimentB.cram',
'location':'HPC_PROJECT',
'md5':'fd5a95c18ebb7145645e95ce08d729e3',
'size':'1528121404'}]
collection_data = [
{'name':'RunA_A',
'type':'demultiplexed_fastq',
'table':'run'},
{'name':'ExperimentA',
'type':'analysis_cram',
'table':'experiment'},
{'name':'ExperimentB',
'type':'analysis_cram',
'table':'experiment'}]
collection_files_data = [
{'name':'RunA_A',
'type':'demultiplexed_fastq',
'file_path':'/path/RunA_A_R1.fastq.gz'},
{'name':'ExperimentA',
'type':'analysis_cram',
'file_path':'/path/ExperimentA.cram'},
{'name':'ExperimentB',
'type':'analysis_cram',
'file_path':'/path/ExperimentB.cram'}]
base.start_session()
## store platform data
pl = PlatformAdaptor(**{'session':base.session})
pl.store_platform_data(data=platform_data)
## store flowcell rules data
pl.store_flowcell_barcode_rule(data=flowcell_rule_data)
## store seqrun data
sra = SeqrunAdaptor(**{'session':base.session})
sra.store_seqrun_and_attribute_data(data=seqrun_data)
## store user data
ua=UserAdaptor(**{'session':base.session})
ua.store_user_data(data=user_data)
## store project data
pa = ProjectAdaptor(**{'session':base.session})
pa.store_project_and_attribute_data(data=project_data)
## assign users to projects
pa.assign_user_to_project(data=project_user_data)
## store sample data
sa = SampleAdaptor(**{'session':base.session})
sa.store_sample_and_attribute_data(data=sample_data)
## store experiment data
ea = ExperimentAdaptor(**{'session':base.session})
ea.store_project_and_attribute_data(data=experiment_data)
## store run data
ra = RunAdaptor(**{'session':base.session})
ra.store_run_and_attribute_data(data=run_data)
## store files to db
fa = FileAdaptor(**{'session':base.session})
fa.store_file_and_attribute_data(data=file_data)
## store collection info to db
ca = CollectionAdaptor(**{'session':base.session})
ca.store_collection_and_attribute_data(data=collection_data)
## assign files to collections
ca.create_collection_group(data=collection_files_data)
base.close_session()
def tearDown(self):
Base.metadata.drop_all(self.engine)
def test_mark_project_as_withdrawn(self):
base = BaseAdaptor(**{'session_class':self.session_class})
base.start_session()
query = \
base.session.\
query(
Sample.status.label('sample_status'),
Experiment.status.label('exp_status'),
Run.status.label('run_status'),
File.status.label('file_status')).\
join(Project,Project.project_id==Sample.project_id).\
join(Experiment,Experiment.sample_id==Sample.sample_id).\
join(Run,Run.experiment_id==Experiment.experiment_id).\
join(Collection,Collection.name==Run.run_igf_id).\
join(Collection_group,Collection.collection_id==Collection_group.collection_id).\
join(File,File.file_id==Collection_group.file_id).\
filter(Collection.type=='demultiplexed_fastq').\
filter(Collection.table=='run').\
filter(Project.project_igf_id=='ProjectA')
records = base.fetch_records(query=query)
self.assertEqual(records['sample_status'].values[0],'ACTIVE')
self.assertEqual(records['exp_status'].values[0],'ACTIVE')
self.assertEqual(records['run_status'].values[0],'ACTIVE')
self.assertEqual(records['file_status'].values[0],'ACTIVE')
query = \
base.session.\
query(
Sample.status.label('sample_status'),
Experiment.status.label('exp_status'),
File.status.label('file_status')).\
join(Project,Project.project_id==Sample.project_id).\
join(Experiment,Experiment.sample_id==Sample.sample_id).\
join(Collection,Collection.name==Experiment.experiment_igf_id).\
join(Collection_group,Collection.collection_id==Collection_group.collection_id).\
join(File,File.file_id==Collection_group.file_id).\
filter(Collection.type=='analysis_cram').\
filter(Collection.table=='experiment').\
filter(Project.project_igf_id=='ProjectB')
records = base.fetch_records(query=query)
self.assertEqual(records['sample_status'].values[0],'ACTIVE')
self.assertEqual(records['exp_status'].values[0],'ACTIVE')
self.assertEqual(records['file_status'].values[0],'ACTIVE')
base.close_session()
mark_project_as_withdrawn(
project_igf_id='ProjectA',
db_session_class=self.session_class,
withdrawn_tag='WITHDRAWN')
base.start_session()
query = \
base.session.\
query(
Sample.status.label('sample_status'),
Experiment.status.label('exp_status'),
Run.status.label('run_status'),
File.status.label('file_status')).\
join(Project,Project.project_id==Sample.project_id).\
join(Experiment,Experiment.sample_id==Sample.sample_id).\
join(Run,Run.experiment_id==Experiment.experiment_id).\
join(Collection,Collection.name==Run.run_igf_id).\
join(Collection_group,Collection.collection_id==Collection_group.collection_id).\
join(File,File.file_id==Collection_group.file_id).\
filter(Collection.type=='demultiplexed_fastq').\
filter(Collection.table=='run').\
filter(Project.project_igf_id=='ProjectA')
records = base.fetch_records(query=query)
self.assertEqual(records['sample_status'].values[0],'WITHDRAWN')
self.assertEqual(records['exp_status'].values[0],'WITHDRAWN')
self.assertEqual(records['run_status'].values[0],'WITHDRAWN')
self.assertEqual(records['file_status'].values[0],'WITHDRAWN')
query = \
base.session.\
query(
Sample.status.label('sample_status'),
Experiment.status.label('exp_status'),
File.status.label('file_status')).\
join(Project,Project.project_id==Sample.project_id).\
join(Experiment,Experiment.sample_id==Sample.sample_id).\
join(Collection,Collection.name==Experiment.experiment_igf_id).\
join(Collection_group,Collection.collection_id==Collection_group.collection_id).\
join(File,File.file_id==Collection_group.file_id).\
filter(Collection.type=='analysis_cram').\
filter(Collection.table=='experiment').\
filter(Project.project_igf_id=='ProjectB')
records = base.fetch_records(query=query)
self.assertEqual(records['sample_status'].values[0],'ACTIVE')
self.assertEqual(records['exp_status'].values[0],'ACTIVE')
self.assertEqual(records['file_status'].values[0],'ACTIVE')
base.close_session()
def test_get_files_and_irods_path_for_project(self):
base = BaseAdaptor(**{'session_class':self.session_class})
file_list, irods_dir = \
get_files_and_irods_path_for_project(
project_igf_id='ProjectA',
db_session_class=self.session_class,
irods_path_prefix='/igfZone/home/')
base.start_session()
query = \
base.session.\
query(File.file_path).\
join(Collection_group,File.file_id==Collection_group.file_id).\
join(Collection,Collection.collection_id==Collection_group.collection_id).\
join(Experiment,Experiment.experiment_igf_id==Collection.name).\
join(Sample,Sample.sample_id==Experiment.sample_id).\
join(Project,Project.project_id==Sample.project_id).\
filter(Collection.table=='experiment').\
filter(Project.project_igf_id=='ProjectA')
exp_file_list = base.fetch_records(query=query)
exp_file_list = exp_file_list['file_path'].values
self.assertTrue(exp_file_list[0] in file_list)
query = \
base.session.\
query(File.file_path).\
join(Collection_group,File.file_id==Collection_group.file_id).\
join(Collection,Collection.collection_id==Collection_group.collection_id).\
join(Experiment,Experiment.experiment_igf_id==Collection.name).\
join(Sample,Sample.sample_id==Experiment.sample_id).\
join(Project,Project.project_id==Sample.project_id).\
filter(Collection.table=='experiment').\
filter(Project.project_igf_id=='ProjectB')
exp_file_list = base.fetch_records(query=query)
exp_file_list = exp_file_list['file_path'].values
base.close_session()
self.assertTrue(exp_file_list[0] not in file_list)
self.assertEqual(irods_dir, '/igfZone/home/usera/ProjectA')
def test_mark_project_and_list_files_for_cleanup(self):
work_dir = get_temp_dir()
mark_project_and_list_files_for_cleanup(
project_igf_id='ProjectA',
dbconfig_file=self.dbconfig,
outout_dir=work_dir,
force_overwrite=True,
use_ephemeral_space=False,
irods_path_prefix='/igfZone/home/',
withdrawn_tag='WITHDRAWN')
file_list_path = os.path.join(work_dir,'ProjectA_all_files.txt')
irods_file_path = os.path.join(work_dir,'ProjectA_irods_files.txt')
self.assertTrue(os.path.exists(file_list_path))
self.assertTrue(os.path.exists(irods_file_path))
remove_dir(work_dir)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
maxiee/MyCodes
|
MachineLearningInAction/Chapter1/002_knn.py
|
1
|
3244
|
__author__ = 'Maxiee'
# -*- coding: UTF-8 -*-
from numpy import *
import operator
import matplotlib.pyplot as plt
def createDataSet():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group, labels
def classify0(inX,dataSet, labels, k):
'''
:param inX: 新的未知数据
:param dataSet: 已知数据集
:param labels: 已知目标变量
:param k:
:return:
'''
dataSetSize = dataSet.shape[0]
# 计算距离
# tile 重复矩阵 inX 来构建新矩阵
diffMat = tile(inX, (dataSetSize,1)) - dataSet # 算出各特征之差
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis = 1)
distances = sqDistances ** 0.5 # 算出未知数据到各已知数据的距离值
sortedDistIndicies = distances.argsort() # 排序
classCount = {}
# 选择距离最近的K个点
for i in range(k):
# 这里是一个频率统计
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.iteritems(),
key = operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def file2matrix(filename):
fr = open(filename)
arrayOlines=fr.readlines()
numberOfLines = len(arrayOlines)
returnMat = zeros((numberOfLines,3)) # 限定矩阵仅有3列
classLabelVector = [] #
index = 0
for line in arrayOlines:
line = line.strip() # 去掉回车符
listFromLine = line.split('\t') # 将一整行数据打散为List
returnMat[index,:]=listFromLine[0:3] # 前三个表示特征的
classLabelVector.append(int(listFromLine[-1])) # 最后一个表示目标变量
index += 1
return returnMat, classLabelVector
def autoNorm(dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m,1))
normDataSet = normDataSet/tile(ranges, (m,1))
return normDataSet, ranges, minVals
def datingClassTest():
hoRatio = 0.10
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m*hoRatio)
errorCount=0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],\
datingLabels[numTestVecs:m],3)
print "the classifier came back with: %d, the real answer is: %d"\
% (classifierResult, datingLabels[i])
if(classifierResult != datingLabels[i]): errorCount += 1.0
print "the total error rate is: %f" % (errorCount/float(numTestVecs))
if __name__ == "__main__":
# TODO BUG 这里的数据集给错了,应该给测试数据集
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(normMat[:,1], normMat[:,2],15.0*array(datingLabels),15.0*array(datingLabels))
plt.show()
datingClassTest()
|
gpl-3.0
|
almarklein/scikit-image
|
doc/examples/plot_glcm.py
|
2
|
3277
|
"""
=====================
GLCM Texture Features
=====================
This example illustrates texture classification using texture
classification using grey level co-occurrence matrices (GLCMs).
A GLCM is a histogram of co-occurring greyscale values at a given
offset over an image.
In this example, samples of two different textures are extracted from
an image: grassy areas and sky areas. For each patch, a GLCM with
a horizontal offset of 5 is computed. Next, two features of the
GLCM matrices are computed: dissimilarity and correlation. These are
plotted to illustrate that the classes form clusters in feature space.
In a typical classification problem, the final step (not included in
this example) would be to train a classifier, such as logistic
regression, to label image patches from new images.
"""
import matplotlib.pyplot as plt
from skimage.feature import greycomatrix, greycoprops
from skimage import data
PATCH_SIZE = 21
# open the camera image
image = data.camera()
# select some patches from grassy areas of the image
grass_locations = [(474, 291), (440, 433), (466, 18), (462, 236)]
grass_patches = []
for loc in grass_locations:
grass_patches.append(image[loc[0]:loc[0] + PATCH_SIZE,
loc[1]:loc[1] + PATCH_SIZE])
# select some patches from sky areas of the image
sky_locations = [(54, 48), (21, 233), (90, 380), (195, 330)]
sky_patches = []
for loc in sky_locations:
sky_patches.append(image[loc[0]:loc[0] + PATCH_SIZE,
loc[1]:loc[1] + PATCH_SIZE])
# compute some GLCM properties each patch
xs = []
ys = []
for i, patch in enumerate(grass_patches + sky_patches):
glcm = greycomatrix(patch, [5], [0], 256, symmetric=True, normed=True)
xs.append(greycoprops(glcm, 'dissimilarity')[0, 0])
ys.append(greycoprops(glcm, 'correlation')[0, 0])
# create the figure
plt.figure(figsize=(8, 8))
# display the image patches
for i, patch in enumerate(grass_patches):
plt.subplot(3, len(grass_patches), len(grass_patches) * 1 + i + 1)
plt.imshow(patch, cmap=plt.cm.gray, interpolation='nearest',
vmin=0, vmax=255)
plt.xlabel('Grass %d' % (i + 1))
for i, patch in enumerate(sky_patches):
plt.subplot(3, len(grass_patches), len(grass_patches) * 2 + i + 1)
plt.imshow(patch, cmap=plt.cm.gray, interpolation='nearest',
vmin=0, vmax=255)
plt.xlabel('Sky %d' % (i + 1))
# display original image with locations of patches
plt.subplot(3, 2, 1)
plt.imshow(image, cmap=plt.cm.gray, interpolation='nearest',
vmin=0, vmax=255)
for (y, x) in grass_locations:
plt.plot(x + PATCH_SIZE / 2, y + PATCH_SIZE / 2, 'gs')
for (y, x) in sky_locations:
plt.plot(x + PATCH_SIZE / 2, y + PATCH_SIZE / 2, 'bs')
plt.xlabel('Original Image')
plt.xticks([])
plt.yticks([])
plt.axis('image')
# for each patch, plot (dissimilarity, correlation)
plt.subplot(3, 2, 2)
plt.plot(xs[:len(grass_patches)], ys[:len(grass_patches)], 'go',
label='Grass')
plt.plot(xs[len(grass_patches):], ys[len(grass_patches):], 'bo',
label='Sky')
plt.xlabel('GLCM Dissimilarity')
plt.ylabel('GLVM Correlation')
plt.legend()
# display the patches and plot
plt.suptitle('Grey level co-occurrence matrix features', fontsize=14)
plt.show()
|
bsd-3-clause
|
hmendozap/auto-sklearn
|
test/test_pipeline/components/feature_preprocessing/test_choice.py
|
1
|
1224
|
from __future__ import print_function
import unittest
import autosklearn.pipeline.components.feature_preprocessing as fp
class FeatureProcessingTest(unittest.TestCase):
def test_get_available_components(self):
# Target type
for target_type, num_values in [('classification', 16),
('regression', 13)]:
data_properties = {'target_type': target_type}
available_components = fp.FeaturePreprocessorChoice\
.get_available_components(data_properties)
self.assertEqual(len(available_components), num_values)
# Multiclass
data_properties = {'target_type': 'classification',
'multiclass': True}
available_components = fp.FeaturePreprocessorChoice \
.get_available_components(data_properties)
self.assertEqual(len(available_components), 16)
# Multilabel
data_properties = {'target_type': 'classification',
'multilabel': True}
available_components = fp.FeaturePreprocessorChoice \
.get_available_components(data_properties)
self.assertEqual(len(available_components), 12)
|
bsd-3-clause
|
stylianos-kampakis/scikit-learn
|
examples/svm/plot_oneclass.py
|
249
|
2302
|
"""
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
|
bsd-3-clause
|
hollerith/trading-with-python
|
lib/functions.py
|
76
|
11627
|
# -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df)
|
bsd-3-clause
|
JosmanPS/scikit-learn
|
sklearn/metrics/scorer.py
|
211
|
13141
|
"""
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
|
bsd-3-clause
|
wkentaro/fcn
|
examples/apc2016/datasets/v1.py
|
1
|
1072
|
from base import APC2016DatasetBase
from jsk import APC2016jskDataset
from rbo import APC2016rboDataset
class APC2016DatasetV1(APC2016DatasetBase):
def __init__(self, data_type):
assert data_type in ('train', 'val')
self.datasets = [
APC2016jskDataset(data_type),
APC2016rboDataset(data_type),
]
def __len__(self):
return sum(len(d) for d in self.datasets)
def get_example(self, i):
skipped = 0
for dataset in self.datasets:
current_index = i - skipped
if current_index < len(dataset):
return dataset.get_example(current_index)
skipped += len(dataset)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import six
dataset_train = APC2016DatasetV1('train')
dataset_val = APC2016DatasetV1('val')
print('train: %d, val: %d' % (len(dataset_train), len(dataset_val)))
for i in six.moves.range(len(dataset_val)):
viz = dataset_val.visualize_example(i)
plt.imshow(viz)
plt.show()
|
mit
|
IssamLaradji/scikit-learn
|
examples/cluster/plot_lena_compress.py
|
271
|
2229
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
|
bsd-3-clause
|
LiaoPan/scikit-learn
|
examples/cluster/plot_kmeans_silhouette_analysis.py
|
242
|
5885
|
"""
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
|
bsd-3-clause
|
harisbal/pandas
|
pandas/tests/io/parser/test_parsers.py
|
1
|
5463
|
# -*- coding: utf-8 -*-
import os
import pytest
from pandas._libs.tslib import Timestamp
from pandas.compat import StringIO
from pandas import DataFrame, read_csv, read_table
import pandas.core.common as com
import pandas.util.testing as tm
from .c_parser_only import CParserTests
from .comment import CommentTests
from .common import ParserTests
from .compression import CompressionTests
from .converters import ConverterTests
from .dialect import DialectTests
from .dtypes import DtypeTests
from .header import HeaderTests
from .index_col import IndexColTests
from .mangle_dupes import DupeColumnTests
from .multithread import MultithreadTests
from .na_values import NAvaluesTests
from .parse_dates import ParseDatesTests
from .python_parser_only import PythonParserTests
from .quoting import QuotingTests
from .skiprows import SkipRowsTests
from .usecols import UsecolsTests
class BaseParser(CommentTests, CompressionTests,
ConverterTests, DialectTests,
DtypeTests, DupeColumnTests,
HeaderTests, IndexColTests,
MultithreadTests, NAvaluesTests,
ParseDatesTests, ParserTests,
SkipRowsTests, UsecolsTests,
QuotingTests):
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def float_precision_choices(self):
raise com.AbstractMethodError(self)
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath('io', 'parser', 'data')
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
self.csv_shiftjs = os.path.join(self.dirpath, 'sauron.SHIFT_JIS.csv')
class TestCParserHighMemory(BaseParser, CParserTests):
engine = 'c'
low_memory = False
float_precision_choices = [None, 'high', 'round_trip']
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = self.low_memory
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = self.low_memory
with tm.assert_produces_warning(FutureWarning):
df = read_table(*args, **kwds)
return df
class TestCParserLowMemory(BaseParser, CParserTests):
engine = 'c'
low_memory = True
float_precision_choices = [None, 'high', 'round_trip']
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = self.low_memory
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = True
with tm.assert_produces_warning(FutureWarning):
df = read_table(*args, **kwds)
return df
class TestPythonParser(BaseParser, PythonParserTests):
engine = 'python'
float_precision_choices = [None]
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
with tm.assert_produces_warning(FutureWarning):
df = read_table(*args, **kwds)
return df
class TestUnsortedUsecols(object):
def test_override__set_noconvert_columns(self):
# GH 17351 - usecols needs to be sorted in _setnoconvert_columns
# based on the test_usecols_with_parse_dates test from usecols.py
from pandas.io.parsers import CParserWrapper, TextFileReader
s = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == 'integer':
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
parser = MyTextFileReader()
parser.options = {'usecols': [0, 2, 3],
'parse_dates': parse_dates,
'delimiter': ','}
parser._engine = MyCParserWrapper(StringIO(s), **parser.options)
df = parser.read()
tm.assert_frame_equal(df, expected)
|
bsd-3-clause
|
Obus/scikit-learn
|
benchmarks/bench_plot_fastkmeans.py
|
294
|
4676
|
from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
|
bsd-3-clause
|
OwaJawa/kaggle-galaxies
|
try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_big256.py
|
7
|
17443
|
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_big256.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_big256.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=256, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
|
bsd-3-clause
|
cpcloud/ibis
|
ibis/pandas/execution/window.py
|
1
|
11387
|
"""Code for computing window functions with ibis and pandas."""
import operator
import re
from collections import OrderedDict
import pandas as pd
import toolz
from pandas.core.groupby import SeriesGroupBy
import ibis.common.exceptions as com
import ibis.expr.operations as ops
import ibis.expr.window as win
import ibis.pandas.aggcontext as agg_ctx
from ibis.pandas.core import (
date_types,
execute,
integer_types,
simple_types,
timedelta_types,
timestamp_types,
)
from ibis.pandas.dispatch import execute_node, pre_execute
from ibis.pandas.execution import util
def _post_process_empty(scalar, parent, order_by, group_by):
assert not order_by and not group_by
index = parent.index
result = pd.Series([scalar]).repeat(len(index))
result.index = index
return result
def _post_process_group_by(series, parent, order_by, group_by):
assert not order_by and group_by
return series
def _post_process_order_by(series, parent, order_by, group_by):
assert order_by and not group_by
indexed_parent = parent.set_index(order_by)
index = indexed_parent.index
names = index.names
if len(names) > 1:
series = series.reorder_levels(names)
series = series.iloc[index.argsort(kind='mergesort')]
return series
def _post_process_group_by_order_by(series, parent, order_by, group_by):
indexed_parent = parent.set_index(group_by + order_by, append=True)
index = indexed_parent.index
# get the names of the levels that will be in the result
series_index_names = frozenset(series.index.names)
# get the levels common to series.index, in the order that they occur in
# the parent's index
reordered_levels = [
name for name in index.names if name in series_index_names
]
if len(reordered_levels) > 1:
series = series.reorder_levels(reordered_levels)
return series
@execute_node.register(ops.WindowOp, pd.Series, win.Window)
def execute_window_op(
op, data, window, scope=None, aggcontext=None, clients=None, **kwargs
):
operand = op.expr
# pre execute "manually" here because otherwise we wouldn't pickup
# relevant scope changes from the child operand since we're managing
# execution of that by hand
operand_op = operand.op()
pre_executed_scope = pre_execute(
operand_op, *clients, scope=scope, aggcontext=aggcontext, **kwargs
)
scope = toolz.merge(scope, pre_executed_scope)
root, = op.root_tables()
root_expr = root.to_expr()
data = execute(
root_expr,
scope=scope,
clients=clients,
aggcontext=aggcontext,
**kwargs,
)
following = window.following
order_by = window._order_by
if (
order_by
and following != 0
and not isinstance(operand_op, ops.ShiftBase)
):
raise com.OperationNotDefinedError(
'Window functions affected by following with order_by are not '
'implemented'
)
group_by = window._group_by
grouping_keys = [
key_op.name
if isinstance(key_op, ops.TableColumn)
else execute(
key, scope=scope, clients=clients, aggcontext=aggcontext, **kwargs
)
for key, key_op in zip(
group_by, map(operator.methodcaller('op'), group_by)
)
]
order_by = window._order_by
if not order_by:
ordering_keys = ()
if group_by:
if order_by:
(
sorted_df,
grouping_keys,
ordering_keys,
) = util.compute_sorted_frame(
data, order_by, group_by=group_by, **kwargs
)
source = sorted_df.groupby(grouping_keys, sort=True)
post_process = _post_process_group_by_order_by
else:
source = data.groupby(grouping_keys, sort=False)
post_process = _post_process_group_by
else:
if order_by:
source, grouping_keys, ordering_keys = util.compute_sorted_frame(
data, order_by, **kwargs
)
post_process = _post_process_order_by
else:
source = data
post_process = _post_process_empty
new_scope = toolz.merge(
scope,
OrderedDict((t, source) for t in operand.op().root_tables()),
factory=OrderedDict,
)
# figure out what the dtype of the operand is
operand_type = operand.type()
operand_dtype = operand_type.to_pandas()
# no order by or group by: default summarization aggcontext
#
# if we're reducing and we have an order by expression then we need to
# expand or roll.
#
# otherwise we're transforming
if not grouping_keys and not ordering_keys:
aggcontext = agg_ctx.Summarize()
elif (
isinstance(
operand.op(), (ops.Reduction, ops.CumulativeOp, ops.Any, ops.All)
)
and ordering_keys
):
# XXX(phillipc): What a horror show
preceding = window.preceding
if preceding is not None:
max_lookback = window.max_lookback
assert not isinstance(operand.op(), ops.CumulativeOp)
aggcontext = agg_ctx.Moving(
preceding,
max_lookback,
parent=source,
group_by=grouping_keys,
order_by=ordering_keys,
dtype=operand_dtype,
)
else:
# expanding window
aggcontext = agg_ctx.Cumulative(
parent=source,
group_by=grouping_keys,
order_by=ordering_keys,
dtype=operand_dtype,
)
else:
# groupby transform (window with a partition by clause in SQL parlance)
aggcontext = agg_ctx.Transform(
parent=source,
group_by=grouping_keys,
order_by=ordering_keys,
dtype=operand_dtype,
)
result = execute(
operand,
scope=new_scope,
aggcontext=aggcontext,
clients=clients,
**kwargs,
)
series = post_process(result, data, ordering_keys, grouping_keys)
assert len(data) == len(
series
), 'input data source and computed column do not have the same length'
return series
@execute_node.register(
(ops.CumulativeSum, ops.CumulativeMax, ops.CumulativeMin),
(pd.Series, SeriesGroupBy),
)
def execute_series_cumulative_sum_min_max(op, data, **kwargs):
typename = type(op).__name__
method_name = (
re.match(r"^Cumulative([A-Za-z_][A-Za-z0-9_]*)$", typename)
.group(1)
.lower()
)
method = getattr(data, "cum{}".format(method_name))
return method()
@execute_node.register(ops.CumulativeMean, (pd.Series, SeriesGroupBy))
def execute_series_cumulative_mean(op, data, **kwargs):
# TODO: Doesn't handle the case where we've grouped/sorted by. Handling
# this here would probably require a refactor.
return data.expanding().mean()
@execute_node.register(ops.CumulativeOp, (pd.Series, SeriesGroupBy))
def execute_series_cumulative_op(op, data, aggcontext=None, **kwargs):
assert aggcontext is not None, "aggcontext is none in {} operation".format(
type(op)
)
typename = type(op).__name__
match = re.match(r'^Cumulative([A-Za-z_][A-Za-z0-9_]*)$', typename)
if match is None:
raise ValueError('Unknown operation {}'.format(typename))
try:
operation_name, = match.groups()
except ValueError:
raise ValueError(
'More than one operation name found in {} class'.format(typename)
)
dtype = op.to_expr().type().to_pandas()
assert isinstance(aggcontext, agg_ctx.Cumulative), 'Got {}'.format(type())
result = aggcontext.agg(data, operation_name.lower())
# all expanding window operations are required to be int64 or float64, so
# we need to cast back to preserve the type of the operation
try:
return result.astype(dtype)
except TypeError:
return result
def post_lead_lag(result, default):
if not pd.isnull(default):
return result.fillna(default)
return result
@execute_node.register(
(ops.Lead, ops.Lag),
(pd.Series, SeriesGroupBy),
integer_types + (type(None),),
simple_types + (type(None),),
)
def execute_series_lead_lag(op, data, offset, default, **kwargs):
func = toolz.identity if isinstance(op, ops.Lag) else operator.neg
result = data.shift(func(1 if offset is None else offset))
return post_lead_lag(result, default)
@execute_node.register(
(ops.Lead, ops.Lag),
(pd.Series, SeriesGroupBy),
timedelta_types,
date_types + timestamp_types + (str, type(None)),
)
def execute_series_lead_lag_timedelta(
op, data, offset, default, aggcontext=None, **kwargs
):
"""An implementation of shifting a column relative to another one that is
in units of time rather than rows.
"""
# lagging adds time (delayed), leading subtracts time (moved up)
func = operator.add if isinstance(op, ops.Lag) else operator.sub
group_by = aggcontext.group_by
order_by = aggcontext.order_by
# get the parent object from which `data` originated
parent = aggcontext.parent
# get the DataFrame from the parent object, handling the DataFrameGroupBy
# case
parent_df = getattr(parent, 'obj', parent)
# index our parent df by grouping and ordering keys
indexed_original_df = parent_df.set_index(group_by + order_by)
# perform the time shift
adjusted_parent_df = parent_df.assign(
**{k: func(parent_df[k], offset) for k in order_by}
)
# index the parent *after* adjustment
adjusted_indexed_parent = adjusted_parent_df.set_index(group_by + order_by)
# get the column we care about
result = adjusted_indexed_parent[getattr(data, 'obj', data).name]
# reindex the shifted data by the original frame's index
result = result.reindex(indexed_original_df.index)
# add a default if necessary
return post_lead_lag(result, default)
@execute_node.register(ops.FirstValue, pd.Series)
def execute_series_first_value(op, data, **kwargs):
return data.values[0]
@execute_node.register(ops.FirstValue, SeriesGroupBy)
def execute_series_group_by_first_value(op, data, aggcontext=None, **kwargs):
return aggcontext.agg(data, 'first')
@execute_node.register(ops.LastValue, pd.Series)
def execute_series_last_value(op, data, **kwargs):
return data.values[-1]
@execute_node.register(ops.LastValue, SeriesGroupBy)
def execute_series_group_by_last_value(op, data, aggcontext=None, **kwargs):
return aggcontext.agg(data, 'last')
@execute_node.register(ops.MinRank, (pd.Series, SeriesGroupBy))
def execute_series_min_rank(op, data, **kwargs):
# TODO(phillipc): Handle ORDER BY
return data.rank(method='min', ascending=True).astype('int64') - 1
@execute_node.register(ops.DenseRank, (pd.Series, SeriesGroupBy))
def execute_series_dense_rank(op, data, **kwargs):
# TODO(phillipc): Handle ORDER BY
return data.rank(method='dense', ascending=True).astype('int64') - 1
@execute_node.register(ops.PercentRank, (pd.Series, SeriesGroupBy))
def execute_series_percent_rank(op, data, **kwargs):
# TODO(phillipc): Handle ORDER BY
return data.rank(method='min', ascending=True, pct=True)
|
apache-2.0
|
luukhoavn/deepnet
|
deepnet/ais.py
|
10
|
7589
|
"""Computes partition function for RBM-like models using Annealed Importance Sampling."""
import numpy as np
from deepnet import dbm
from deepnet import util
from deepnet import trainer as tr
from choose_matrix_library import *
import sys
import numpy as np
import pdb
import time
import itertools
import matplotlib.pyplot as plt
from deepnet import visualize
import lightspeed
def SampleEnergySoftmax(layer, numsamples, use_lightspeed=False):
sample = layer.sample
energy = layer.state
temp = layer.expanded_batch
if use_lightspeed:
layer.ApplyActivation()
layer.state.sum(axis=0, target=layer.temp)
layer.state.div_by_row(layer.temp, target=temp)
probs_cpu = temp.asarray().astype(np.float64)
samples_cpu = lightspeed.SampleSoftmax(probs_cpu, numsamples)
sample.overwrite(samples_cpu.astype(np.float32))
else:
sample.assign(0)
for i in range(numsamples):
energy.perturb_energy_for_softmax_sampling(target=temp)
temp.choose_max_and_accumulate(sample)
def LogMeanExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).mean())
def LogSumExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).sum())
def Display(w, hid_state, input_state, w_var, x_axis):
w = w.asarray().flatten()
#plt.figure(1)
#plt.clf()
#plt.hist(w, 100)
#visualize.display_hidden(hid_state.asarray(), 2, 'activations', prob=True)
#plt.figure(3)
#plt.clf()
#plt.imshow(hid_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
#plt.figure(4)
#plt.clf()
#plt.imshow(input_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
#, state.shape[0], state.shape[1], state.shape[0], 3, title='Markov chains')
#plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.figure(5)
plt.clf()
plt.suptitle('Variance')
plt.plot(np.array(x_axis), np.array(w_var))
plt.draw()
def AISReplicatedSoftmax(model, D, num_chains, display=False):
schedule = np.concatenate((
#np.arange(0.0, 1.0, 0.01),
#np.arange(0.0, 1.0, 0.001),
np.arange(0.0, 0.7, 0.001), # 700
np.arange(0.7, 0.9, 0.0001), # 2000
np.arange(0.9, 1.0, 0.00002) # 5000
))
#schedule = np.array([0.])
cm.CUDAMatrix.init_random(seed=0)
assert len(model.layer) == 2, 'Only implemented for RBMs.'
steps = len(schedule)
input_layer = model.layer[0]
hidden_layer = model.layer[1]
edge = model.edge[0]
batchsize = num_chains
w = edge.params['weight']
a = hidden_layer.params['bias']
b = input_layer.params['bias']
numvis, numhid = w.shape
f = 0.1
input_layer.AllocateBatchsizeDependentMemory(num_chains)
hidden_layer.AllocateBatchsizeDependentMemory(num_chains)
# INITIALIZE TO SAMPLES FROM BASE MODEL.
input_layer.state.assign(0)
input_layer.NN.assign(D)
input_layer.state.add_col_mult(b, f)
SampleEnergySoftmax(input_layer, D)
w_ais = cm.CUDAMatrix(np.zeros((1, batchsize)))
#pdb.set_trace()
w_variance = []
x_axis = []
if display:
Display(w_ais, hidden_layer.state, input_layer.state, w_variance, x_axis)
#raw_input('Press Enter.')
#pdb.set_trace()
# RUN AIS.
for i in range(steps-1):
sys.stdout.write('\r%d' % (i+1))
sys.stdout.flush()
cm.dot(w.T, input_layer.sample, target=hidden_layer.state)
hidden_layer.state.add_col_mult(a, D)
hidden_layer.state.mult(schedule[i], target=hidden_layer.temp)
hidden_layer.state.mult(schedule[i+1])
cm.log_1_plus_exp(hidden_layer.state, target=hidden_layer.deriv)
cm.log_1_plus_exp(hidden_layer.temp)
hidden_layer.deriv.subtract(hidden_layer.temp)
w_ais.add_sums(hidden_layer.deriv, axis=0)
w_ais.add_dot(b.T, input_layer.sample, mult=(1-f)*(schedule[i+1]-schedule[i]))
hidden_layer.ApplyActivation()
hidden_layer.Sample()
cm.dot(w, hidden_layer.sample, target=input_layer.state)
input_layer.state.add_col_vec(b)
input_layer.state.mult(schedule[i+1])
input_layer.state.add_col_mult(b, f*(1-schedule[i+1]))
SampleEnergySoftmax(input_layer, D)
if display and (i % 100 == 0 or i == steps - 2):
w_variance.append(w_ais.asarray().var())
x_axis.append(i)
Display(w_ais, hidden_layer.state, input_layer.sample, w_variance, x_axis)
sys.stdout.write('\n')
z = LogMeanExp(w_ais.asarray()) + D * LogSumExp(f * b.asarray()) + numhid * np.log(2)
return z
def AISBinaryRbm(model, schedule):
cm.CUDAMatrix.init_random(seed=int(time.time()))
assert len(model.layer) == 2, 'Only implemented for RBMs.'
steps = len(schedule)
input_layer = model.layer[0]
hidden_layer = model.layer[1]
edge = model.edge[0]
batchsize = model.t_op.batchsize
w = edge.params['weight']
a = hidden_layer.params['bias']
b = input_layer.params['bias']
numvis, numhid = w.shape
# INITIALIZE TO UNIFORM RANDOM.
input_layer.state.assign(0)
input_layer.ApplyActivation()
input_layer.Sample()
w_ais = cm.CUDAMatrix(np.zeros((1, batchsize)))
unitcell = cm.empty((1, 1))
# RUN AIS.
for i in range(1, steps):
cm.dot(w.T, input_layer.sample, target=hidden_layer.state)
hidden_layer.state.add_col_vec(a)
hidden_layer.state.mult(schedule[i-1], target=hidden_layer.temp)
hidden_layer.state.mult(schedule[i])
cm.log_1_plus_exp(hidden_layer.state, target=hidden_layer.deriv)
cm.log_1_plus_exp(hidden_layer.temp)
hidden_layer.deriv.subtract(hidden_layer.temp)
w_ais.add_sums(hidden_layer.deriv, axis=0)
w_ais.add_dot(b.T, input_layer.state, mult=schedule[i]-schedule[i-1])
hidden_layer.ApplyActivation()
hidden_layer.Sample()
cm.dot(w, hidden_layer.sample, target=input_layer.state)
input_layer.state.add_col_vec(b)
input_layer.state.mult(schedule[i])
input_layer.ApplyActivation()
input_layer.Sample()
z = LogMeanExp(w_ais.asarray()) + numvis * np.log(2) + numhid * np.log(2)
return z
def GetAll(n):
x = np.zeros((n, 2**n))
a = []
for i in range(n):
a.append([0, 1])
for i, r in enumerate(itertools.product(*tuple(a))):
x[:, i] = np.array(r)
return x
def ExactZ_binary_binary(model):
assert len(model.layer) == 2, 'Only implemented for RBMs.'
steps = len(schedule)
input_layer = model.layer[0]
hidden_layer = model.layer[1]
edge = model.edge[0]
w = edge.params['weight']
a = hidden_layer.params['bias']
b = input_layer.params['bias']
numvis, numhid = w.shape
batchsize = 2**numvis
input_layer.AllocateBatchsizeDependentMemory(batchsize)
hidden_layer.AllocateBatchsizeDependentMemory(batchsize)
all_inputs = GetAll(numvis)
w_ais = cm.CUDAMatrix(np.zeros((1, batchsize)))
input_layer.sample.overwrite(all_inputs)
cm.dot(w.T, input_layer.sample, target=hidden_layer.state)
hidden_layer.state.add_col_vec(a)
cm.log_1_plus_exp(hidden_layer.state)
w_ais.add_sums(hidden_layer.state, axis=0)
w_ais.add_dot(b.T, input_layer.state)
offset = float(w_ais.asarray().max())
w_ais.subtract(offset)
cm.exp(w_ais)
z = offset + np.log(w_ais.asarray().sum())
return z
def Usage():
print '%s <model file> <number of Markov chains to run> [number of words (for Replicated Softmax models)]'
if __name__ == '__main__':
board = tr.LockGPU()
model_file = sys.argv[1]
numchains = int(sys.argv[2])
if len(sys.argv) > 3:
D = int(sys.argv[3]) #10 # number of words.
m = dbm.DBM(model_file)
m.LoadModelOnGPU(batchsize=numchains)
plt.ion()
log_z = AISReplicatedSoftmax(m, D, numchains, display=True)
print 'Log Z %.5f' % log_z
#log_z = AIS(m, schedule)
#print 'Log Z %.5f' % log_z
#log_z = ExactZ_binary_binary(m)
#print 'Exact %.5f' % log_z
tr.FreeGPU(board)
raw_input('Press Enter.')
|
bsd-3-clause
|
lo-co/atm-py
|
build/lib/atmPy/for_removal/POPS/mie.py
|
6
|
51426
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This temporary script file is located here:
/Users/htelg/.spyder2/.temp.py
"""
#ToDo
#- ich denke nicht, dass wir neutral nehen muessen .. unserer laser is polarisiert ...
#- indes of refraction at 405 nm for psl
#- results plotten
# Check using http://omlc.ogi.edu/calc/mie_calc.html
import os
import sys
import types
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import numpy as np
import pylab as plt
from scipy.interpolate import interp1d
from atmPy.for_removal.POPS import tools
from atmPy.for_removal.mie import bhmie
###########################
def makeMie_diameter(radiusRangeInMikroMeter = [0.05,1.5],
noOfdiameters = 200,
noOfAngles = 100, # number of scatternig angles
POPSdesign = 'POPS 2',
IOR = 1.45,
WavelengthInUm = .405,
geometry = "perpendicular",
#the following was added 20140904
mirrorJetDist = 10.,
scale = 'log',
#below: added 20141030
broadened = False
):
"""
Performs mie calculations as a function of particle radius
Arguments
---------
geometry: "perpendicular",
broadened: {"style": 'gauss',
'center': 0.405,
'fwhm': 0.005,
'noOfwl': 10}
{'style': 'custom',
'spectrum': (wl,intens),
'interpolate': 100}
Returns
-------
diameter (yes diameter, not like the input, which is in radius ... for historic reasons!)
array and an array of the intensity of the light scattered onto the detector (this is also
not fully correct, since we do not do a decent integration but a sum, at some point it would be nice to change that)
parameters:
"""
if scale == 'log':
dRange = np.logspace(np.log10(radiusRangeInMikroMeter[0]),np.log10(radiusRangeInMikroMeter[1]),noOfdiameters) #radius range
elif scale == 'linear':
dRange = np.linspace(radiusRangeInMikroMeter[0],radiusRangeInMikroMeter[1],noOfdiameters) #radius range
elif scale == 'log_20':
dRange = np.logspace(np.log10(radiusRangeInMikroMeter[0]),np.log10(radiusRangeInMikroMeter[1]),noOfdiameters,base = 100) #radius range
event = Mie(silent = True, design = POPSdesign, indexOfRef = IOR, diameter = 'dynamic') # Ref. for indexOfRef. Patterson 2004
event.set_nang(noOfAngles)
event.POPSdimensions['mirror(top)-jet distance (mm)'] = float(mirrorJetDist)
singleLine = False
if isinstance(WavelengthInUm,float):
exWavelengthInUm=np.array([WavelengthInUm])
elif isinstance(WavelengthInUm,types.ListType):
exWavelengthInUm=np.array(WavelengthInUm)
else:
exWavelengthInUm = WavelengthInUm
if broadened:
if broadened['style'] == 'gauss':
wc = broadened['center']
fwhm = broadened['fwhm']
noOfwl = broadened['noOfwl']
fwxm = 2*np.sqrt(2*np.log(10))* fwhm /(2*np.sqrt(2*np.log(10)))
exWavelengthInUm = np.linspace(wc - fwxm, wc+ fwxm, noOfwl)
normalizer = tools.gauss_function(exWavelengthInUm, wc, fwhm)
if broadened['style'] == 'custom':
exWavelengthInUm = broadened['spectrum'][0]
normalizer = broadened['spectrum'][1]
if broadened['interpolate']:
spectrum = interp1d(exWavelengthInUm,normalizer)
noOfwl = broadened['interpolate']
wl_new = np.linspace(exWavelengthInUm.min(),exWavelengthInUm.max(),noOfwl)
exWavelengthInUm = wl_new
normalizer = spectrum(wl_new)
else:
normalizer = np.ones(exWavelengthInUm.shape)/float(len(exWavelengthInUm))
noOfwl = len(exWavelengthInUm)
if len(exWavelengthInUm) == 1:
singleLine = True
output = np.zeros((exWavelengthInUm.shape[0]+1,dRange.shape[0]))
for e,i in enumerate(exWavelengthInUm):
event.set_wavelength(i)
perpInt = []
for i in dRange:
event.set_r(i)
intensity = event.get_detectableIntensity(geometry)
perpInt.append(intensity)
diameter = np.array(2 * np.array(dRange))
scatteringEfficiency = np.array(perpInt)
# if broadened:
output[0]+= normalizer[e] * scatteringEfficiency/normalizer.sum()
if len(exWavelengthInUm) == 1:
return diameter, scatteringEfficiency
elif not singleLine: #why am I doing that?
output[e+1]=scatteringEfficiency
elif e == int(len(exWavelengthInUm)/2): #why am I doing that?
output[1] = scatteringEfficiency
if broadened:
return diameter,output#,(exWavelengthInUm,normalizer)
else:
return diameter, output
###########################################################
class Mie():
""" Creates a Mie object
An introduction to mie scattering: http://omlc.org/education/ece532/class3/mie_math.html
What to do next:
1. Define the following parameters:
\t silent: \t True or False - if True a lot of output will be generated,
\t usefull for debugging or understanding what whas actually done
\t diameter: \t number or array type - unit is micro meter - if array,
\t scattering efficiency of all diameters in array will be calculated.
\t Note, diameter and wavelength can not be both array type
\t wavelength:\t number or array type - unit is micro meter - if array,
\t scattering efficiency of all wavelength in array will be calculated.
\t Note, diameter and wavelength can not be both array type.
\t indexOfRef:\t number or array type - if array, scattering efficiency for all IORs in array will be calculated.
\t ToDo, sortout diameter wavelength array issues
\t design: \t string - this sets all parameters related to the geometry of the particular version of POPS.
\t see docstring of set_dimensions for details
\t nang: \t integer - angle steps in which calculations are performed.
\t material: \t Not fully functional. TODO: create database
"""
def __init__(self, silent = True, diameter = False, wavelength = False, material = False, indexOfRef = False, design = 'POPS 2', nang = 100):
# if indexOfRef and material:
# raise TypeError('index of refraction is calculated on the defined material. Therefore, either indexOfRef or material has to be set to False')
self.material = material
self.set_wavelength(wavelength)
self.set_d(diameter)
self.set_n(n = indexOfRef)
self.set_nang(nang)
self.silent = silent
self.POPSdimensions = {}
self.design = design
if design:
self.set_dimensions(design)
self.upToDate = False
self.upToDate_geometry = False
self.dontAskAgain = False
def set_dimensions(self, design):
if design == 'POPS 1': # This is the prototype, printed with the old 3D Printer
mDiameter = self.POPSdimensions['mirror diameter (mm)'] = 25.
rMirror = 20. # radius of the underlying sphere of the spherical mirror
self.POPSdimensions['mirror(top)-jet distance (mm)'] = rMirror - tools.segment_hight(rMirror, mDiameter)
self.POPSdimensions['angle: jet-mirrorNormal (rad)'] = np.pi/2. # This is the mean scattering angle (90 deg)
elif design == 'POPS 2': # This is the prototype, printed with the old 3D Printer
mDiameter = self.POPSdimensions['mirror diameter (mm)'] = 25.
rMirror = 20. # radius of the underlying sphere of the spherical mirror
self.POPSdimensions['mirror(top)-jet distance (mm)'] = 7.68
self.POPSdimensions['angle: jet-mirrorNormal (rad)'] = np.pi/2. # This is the mean scattering angle (90 deg)
self.POPSdimensions['polarization (laser)'] = 'perpendicular'
if not self.silent:
print("You are using %s" % design)
print("self.POPSdimensions['mirror(top)-jet distance (mm)']",
self.POPSdimensions['mirror(top)-jet distance (mm)'])
print("self.POPSdimensions['mirror diameter (mm)']", self.POPSdimensions['mirror diameter (mm)'])
print("self.POPSdimensions['angle: jet-mirrorNormal (rad)']",
self.POPSdimensions['angle: jet-mirrorNormal (rad)'])
print(
"(not used yet) self.POPSdimensions['polarization (laser)']", self.POPSdimensions['polarization (laser)'] )
print('Distance from top to bottom of mirror', tools.segment_hight(rMirror, mDiameter))
if self.POPSdimensions['angle: jet-mirrorNormal (rad)'] != np.pi/2:
raw_input("angle is not 90 deg, you better check everythin, it might not work right")
self.upToDate_geometry = False
def set_r(self, r):
if not r:
print(r)
self.r = 0.5 * float(raw_input('please define particle diameter in um: '))
elif np.all(r > 10) and not self.dontAskAgain:
antwort = raw_input("""This is probably an error, are you sure you are giving the particle radius in um?!? (y)""")
if antwort.lower() == 'n':
sys.exit('as you wish ... exit')
else:
self.dontAskAgain = True
else:
self.r = r
self.upToDate = False
def set_d(self, d):
if type(d) == str:
self.r = d
else:
self.set_r(d/2.)
def set_wavelength(self, wavelength):
if wavelength > 10.:
antwort = raw_input("""This is probably an error, are you sure you are giving the wavelength in um?!?""")
if antwort.lower() == 'n':
sys.exit('as you wish ... exit!')
self.wavelength = wavelength
self.upToDate = False
def set_n(self, n = False):
if n and self.material:
raise TypeError('Scattering event is set to "material", n can therefore not be changed')
# elif n and self.n:
# raise TypeError('Refractive index was defined at the begining, n can therefore not be changed')
elif self.material:
materialList = []
materialList.append({'name' : 'polystyrene', 'get' : tools.refIndex_polystyrene})
found = False
for i in materialList:
if i['name'] == self.material:
self.n = i['get'](self.wavelength)
if found:
raise ValueError("Found two matching materials! Not possible!")
else:
found = True
if not found:
raise KeyError('%s is not in the list of available materials' % self.material)
elif not n:
self.n = complex(raw_input('please define refractive index: '))
elif n:
self.n = n
self.upToDate = False
def set_nang(self,nang):
"""set the number of angles between 0 and 90 degries"""
self.nang = nang
self.upToDate = False
def set_SizeParameter(self): #r, wavelength):
self.x = 2*np.pi/self.wavelength * self.r
return self.x
def check_param_Defined(self):
if not self.r:
self.set_r(float(raw_input('please define particle radius in um: ')))
# antwort = F
if not self.wavelength:
self.set_wavelength(float(raw_input('please define wavelength in nm: ')))
# antwort = False
if not self.n:
if self.material:
pass
else:
self.set_n(complex(raw_input('please define defractive index: ')))
# antwort = False
if not self.nang:
self.set_nang(raw_input('please set number of angles between 0 and 90 degrees: '))
# def update(self):
# self.check_param_Defined()
# if not self.upToDate:
# self.set_SizeParameter()
# self.do_bhmie()
# self.upToDate = True
#
# #self.set_xAxes()
# if not self.silent:
# print 'updated'
# else:
# if not self.silent:
# print 'no update needed'
def update_hagen(self):
self.check_param_Defined()
if not self.upToDate:
if self.material:
self.set_n()
self.set_SizeParameter()
self.do_bhmie_hagen()
# self.calc_Natural()
# self.calc_Perpendicular()
# self.calc_Parallel()
self.set_xAxis()
self.upToDate = True
#self.set_xAxes()
if not self.silent:
print('updated')
else:
if not self.silent:
print('no update needed')
def update_geometry(self):
if not self.upToDate_geometry:
self.get_mirror_grid()
self.upToDate = True
def set_xAxis(self):
noOfPts = 2 * ((self.nang * 2) - 1)
self.xAxis = np.linspace(0,2 * np.pi,noOfPts)
def print_current_parameter(self):
"""prints all relevant parameters"""
print('particle diameter: \t\t', 2 * self.r)
print('particle size parameter: \t', self.x)
print('wavelength: \t\t\t', self.wavelength)
print('refractive index: \t\t', self.n)
print('design: \t\t\t', self.design)
# def do_bhmie(self):
# self.s1,self.s2,self.qext,self.qsca,self.qback,self.gsca = bhmie.bhmie(self.x, self.n, self.nang)
def do_bhmie_hagen(self):
if not self.silent:
self.print_current_parameter()
bhh = bhmie.bhmie_hagen(self.x, self.n, self.nang)
s1,s2,self.qext,self.qsca,self.qback,self.gsca = bhh.return_Values()
# data = (abs(self.s1))**2#/(np.pi * self.x**2 * self.qsca)
s1_Reverse = s1[::-1]
self.s1 = np.concatenate((s1,s1_Reverse))
s2_Reverse = s2[::-1]
self.s2 = np.concatenate((s2,s2_Reverse))
def print_Data(self):
for i in self.xAxis:
print(i, ' , ', self.YNatural[i])
def get_mirror_grid(self):
np.set_printoptions(threshold=np.nan)
np.set_printoptions(precision=2)
dm = self.POPSdimensions['mirror diameter (mm)']
h = self.POPSdimensions['mirror(top)-jet distance (mm)']
# print 'h', h
# print 'dm', dm
rSphere = tools.sphereRadius_fromGeometry(h, dm) # 1.
alphMax = tools.alphamax_fromGeometry(h, dm)
sSphere = tools.arc_length(rSphere, h)
# sSphereA = tools.arc_length_alpha(rSphere, alphMax * 2.) #for test purposes
# angleRangeArray, dataRangeArray = tools.find_angleRange(self.POPSdimensions['angle: jet-mirrorNormal (rad)'],alphMax,self.xAxis, data)
angleRangeArray, angleIndexArray = tools.find_angleRange(self.POPSdimensions['angle: jet-mirrorNormal (rad)'], alphMax, self.xAxis)
stepWidth = sSphere/len(angleRangeArray)
self.angleIndexArray = angleIndexArray
nn = len(angleIndexArray)
indexMatrix = np.ones((nn,nn), dtype = int) * angleIndexArray
angleMatrix = np.ones((nn,nn), dtype = int) * angleRangeArray
#
# offAngleMatrix = np.empty((nn,nn))
# offAngleMatrix[:] = 0 #np.NAN
#
yArcLenghtMatrix = np.empty((nn,nn))
yArcLenghtMatrix[:] = 0
# print 'nn', nn
# print 'stepWidth', stepWidth
ArcLengthArray = abs(np.array(list(range(int(-nn / 2), 0, 1)) + list(range(0, int(nn / 2), 1))))
ArcLengthMatrix = np.ones((nn,nn))
ArcLengthMatrix = (ArcLengthMatrix * ArcLengthArray).transpose() * stepWidth
# print "ArcLengthMatrix"
# raw_input(ArcLengthMatrix.astype(int))
rs = tools.sphereSegment_radius(rSphere, angleMatrix - np.pi / 2.)
# print h
ss = tools.arc_length(rs, h)
# print "ss"
# raw_input(ss.astype(int))
ArcLengthMatrix[ArcLengthMatrix > ss/2.] = np.NAN
# print "ArcLengthMatrix"
# raw_input(ArcLengthMatrix.astype(int))
self.offAngleMatrix = .5 * tools.segment_angle(rs, ArcLengthMatrix)
def get_detectableIntensity(self, polarization = "perpendicular"):
""" In this function I want to calculate a solid angle which is defined by the mirror and then all the light which is scattered into that angle.
Parameters:
\t geometry: polarization of the laser with respect to the plane defined by laser beam and the collection direction. values: perpendiular, paralllel or natural
The following calculations might seam weard. However, since there is no way of calculating the scattering efficiency analytically as a function of the angle I create a...
1. claculate the distance from the particle to the edge of the mirror:
rSphere = tools.sphereRadius_fromGeometry(h,dm)
h: shortest distance from particle to plane defined by the edge of the mirror
dm: mirror diameter
2. calculate the maximum colectable scattering angle alphMax. alphMax is the angle between vector(particle-center of morror) and vector(particle-edge of morror)
alphMax = tools.alphamax_fromGeometry(h,dm)
3. calculate the arc length which is cut out of the imaginary circle with radius rSphere by the mirror
sSphere = tools.arc_length(rSphere, h)
4. get the section of the mie_scattering_data based on the angle between incident light and mirror normal (probably 90 deg) and the arc length
angleRangeArray, dataRangeArray = tools.find_angleRange(self.POPSdimensions['angle: jet-mirrorNormal (rad)'],alphMax,self.xAxis, data)
5. since we are are looking at the scattering in 3D we also need to go to the side. We will do that in a way that the angle stays the same.
What will change is the orientation of the scattering plane and ,therefore ,the polariazation of the light with respect to that plane.
Therefor we will go along arcs perpendicular to the arc we calculated before (sSphere) using the same stepwith which is defined by the
number of data points along the sSphere:
stepWidth = sSphere/len(angleRangeArray)
"""
#if isinstance(self, collections.Iterable) and not isinstance(a, types.StringTypes):
# print 'bla'
self.update_hagen()
self.update_geometry()
# mirror_grid = self.get_mirror_grid()
# raw_input('watewatte')
whatList = ('natural', 'parallel', 'perpendicular')
if polarization not in whatList:
raise ValueError('Geometry has to be one of the following: "%s", "%s", or "%s"? %s is not an option' % (
whatList[0], whatList[1], whatList[2], polarization))
fIdx = self.angleIndexArray[0]
lIdx = self.angleIndexArray[-1]
s1Selection = self.s1[fIdx:lIdx+1]
s2Selection = self.s2[fIdx:lIdx+1]
nn = len(self.angleIndexArray)
s1Matrix = (abs(np.ones((nn,nn)) * s1Selection))**2
s2Matrix = (abs(np.ones((nn,nn)) * s2Selection))**2
naturalMatrix = (.5 * s1Matrix) + (.5 * s2Matrix)
# print 's1Matrix'
# raw_input(s1Matrix)
if len(s1Selection) != nn:
raise ValueError('not possible %s %s'%(len(s1Selection),len(self.angleIndexArray)))
if polarization == "parallel":
IntMatrix = ((np.cos(self.offAngleMatrix))**2 * s2Matrix) + ((np.sin(self.offAngleMatrix))**2 * s1Matrix)
elif polarization == "perpendicular":
IntMatrix = ((np.sin(self.offAngleMatrix))**2 * s2Matrix) + ((np.cos(self.offAngleMatrix))**2 * s1Matrix)
elif polarization == "natural":
naturalMatrix[np.isnan(self.offAngleMatrix)] = 0
IntMatrix = naturalMatrix
IntMatrix[np.isnan(IntMatrix)] = 0
integratedIntensity = IntMatrix.sum()
return integratedIntensity# * stepWidth**2
def plot_polar(dataList, log = False):
NUM_COLORS = len(dataList)
cm = plt.get_cmap('gist_rainbow')
cNorm = colors.Normalize(vmin=0, vmax=NUM_COLORS-1)
scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
# ax = fig.add_subplot(111)
fig, ax = plt.subplots(figsize=(12,9), subplot_kw=dict(projection='polar'))
# old way:
#ax.set_color_cycle([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)])
# new way:
ax.set_color_cycle([scalarMap.to_rgba(i) for i in range(NUM_COLORS)])
col = ['r','b']
for e,i in enumerate(dataList):
#color = cm(1.*e/NUM_COLORS) # color will now be an RGBA tuple
if log:
ax.plot(i[0], np.log10(i[1]), linewidth=(3 - (2 * e)), color = col[e])
else:
ax.plot(i[0], i[1], linewidth=(3 - (2 * e)), color = col[e])
# ax.plot(i.xAxis, i.YNatural, linewidth=3-e)
# ax.set_rmax(0.2)
ax.grid(True)
# ax.set_title("A line plot on a polar axis", va='bottom')
if log:
ax.set_ylim((-3, 0))
plt.show()
def plot_POPS_calib(dataList, log = (0,0), title=False):
fig, ax = plt.subplots(figsize=(12,9))
NUM_COLORS = len(dataList)
cm = plt.get_cmap('gist_rainbow')
cNorm = colors.Normalize(vmin=0, vmax=NUM_COLORS-1)
scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
for e,i in enumerate(dataList):
color = cm(1.*e/NUM_COLORS) # color will now be an RGBA tuple
if i['label'] == 'gaus. broadened':
linewidth = 3
color = 'black'
else:
linewidth = 1
ax.plot(i['x'], i['y'], label = i['label'], linewidth = linewidth, color = color)
# ax.plot(i.xAxis, i.YNatural, linewidth=3-e)
# ax.set_rmax(0.2)
ax.grid(True)
if log[1]:
ax.set_yscale('log')
if log[0]:
ax.set_xscale('log')
# ax.set_title("A line plot on a polar axis", va='bottom')
# ax.set_ylim((-3, 0))
if title:
ax.set_title(title)
ax.set_xlabel('Diameter ($\mu$m)')
ax.set_ylabel('Scattering efficiency (arb. u.)')
ax.legend()
plt.show()
return fig, ax
def save(dataList, name ='test', nameAddOn = 'label',extension = '.dat'):
if not os.path.exists('output'):
os.makedirs('output')
print('new directory with name "output" created')
for e,i in enumerate(dataList):
finalName = 'output/'+name
finalName += i['label']
finalName += str(len(i['x'])) + 'pts'
finalName += extension
np.savetxt(finalName,np.array([i['x'],i['y']]).transpose())
if len(finalName) > 35:
print("Warning!!! filename is longer than 35 characters and therefore might cause trouble with Iogr!")
##############################################################################################
##############################################################################################
##############################################################################################
def default():
event = Mie()
event.set_r(.05)
event.set_wavelength(.405)
event.set_n(1.5)
event.set_nang(100)
event.calc_Natural()
print('qext: ', event.qext)
print('qsca: ', event.qsca)
print('qback: ', event.qback)
plot_polar([(event.xAxis, event.YNatural)], log = False)
def steptest():
"""Result: the programm is actually calculating the radiance -> absolut values are indipendent of number of caluclated points"""
event = Mie()
event.set_r(.500)
event.set_wavelength(.6328)
event.set_n(1.5 + 0.1j)
event.set_nang(10)
event.calc_Natural()
print('qext: ', event.qext)
print('qsca: ', event.qsca)
print('qback: ', event.qback)
print('natural: ', event.natural)
eventII = Mie()
eventII.set_r(.500)
eventII.set_wavelength(.6328)
eventII.set_n(1.5 + 0.1j)
eventII.set_nang(50)
eventII.calc_Natural()
print('qext: ', event.qext)
print('qsca: ', event.qsca)
print('qback: ', event.qback)
plot_polar([event,eventII])
def bhmie_hagen_test():
event = Mie()
eventII = Mie()
r = 1.
lamb = .405
n = 1.5 + 0.1j
noOfPts = 100
event.set_r(r)
event.set_wavelength(lamb)
event.set_n(n)
event.set_nang(noOfPts)
event.calc_Natural()
print('qext: ', event.qext)
print('qsca: ', event.qsca)
print('qback: ', event.qback)
print('natural: ', event.natural)
eventII = Mie()
eventII.set_r(r)
eventII.set_wavelength(lamb)
eventII.set_n(n)
eventII.set_nang(noOfPts)
eventII.calc_Natural_hagen()
print('qext: ', eventII.qext)
print('qsca: ', eventII.qsca)
print('qback: ', eventII.qback)
print('natural: ', eventII.natural)
plot_polar([event,eventII])
def test_comparison_to_internet():
""" this does a comparison of our calculations to that from the online calculator"""
f = open('data/mieCalculaterOutput.txt','r')
x_ref = []
y_ref_nat = []
for line in f.readlines():
if 'radius' in line:
print(line)
elif 'n_real' in line:
print(line)
elif 'size parameter' in line:
print(line)
elif 'wavelength' in line:
print(line)
elif line[0] != '#':
values = line.split()
x_ref.append(np.deg2rad(float(values[0])))
y_ref_nat.append(float(values[1]))
x_ref = np.array(x_ref)
y_ref_nat = np.array(y_ref_nat)
#y_calc = np.array(event.YNatural)
#sys.exit('ende gut alles gut')
event = Mie(wavelength = 0.6328, diameter = 1.0, indexOfRef = 1.5, nang = 100)
event.calc_Natural_hagen()
print("length", len(event.xAxis), len(x_ref))
print(" first x", event.xAxis[25], x_ref[0])
print('first y', event.YNatural[0], y_ref_nat[0])
print('max', event.YNatural.max(), y_ref_nat.max())
plot_polar([(x_ref,y_ref_nat/y_ref_nat.max()),(event.xAxis, event.YNatural/event.YNatural.max())], log = True)
#plot_polar([(x_ref,y_ref_nat/y_ref_nat.max())])
def test_calc_nintyOnly():
""" plots scattering scattering efficiency at exactly 90 deg compared to the area colected by the mirror of the POPS"""
#event_point = Mie(silent = True, wavelength = 0.6328, diameter = 1.0, indexOfRef = 1.5, nang = 100)
event_area = Mie(silent = True, design = 'POPS 1',wavelength = 0.6328, diameter = 1.0, indexOfRef = 1.5, nang = 100)
# event.POPSdimentions['mirror diameter (mm)'] = 25
# event.POPSdimentions['mirror(top)-jet distance (mm)'] = 12.25
# event.POPSdimentions['angle: jet-mirrorNormal (rad)'] = np.pi/2.
mDiameter = event_area.POPSdimensions['mirror diameter (mm)'] = 25.
rMirror = 20. # radius of the underlying sphere of the spherical mirror
event_area.POPSdimensions['mirror(top)-jet distance (mm)'] = 1000.#rMirror - tools.segment_hight(rMirror,mDiameter)
event_area.POPSdimensions['angle: jet-mirrorNormal (rad)'] = np.pi/2. # This is the mean scattering angle (90 deg)
diameters = np.logspace(-1,1,10)
#xList_point = []
yList_point = []
xList_area = []
yList_area = []
for i in diameters:
# print 'i', i
#event_point.set_d(i)
#event_point.calc_Natural_hagen()
#intensity = event_point.YNatural[int(event_point.nang)]
#xList_point.append(i)
#yList_point.append(intensity)
event_area.set_d(i)
event_area.calc_Natural_hagen()
intensity = event_area.YNatural[int(event_area.nang)]
yList_point.append(intensity)
intensity = event_area.get_detectableIntensity()
xList_area.append(i)
yList_area.append(intensity)
spect_dic_point= {'x': xList_area, 'y': yList_point, 'label': '90'}
spect_dic_area = {'x': xList_area, 'y': yList_area, 'label': 'area'}
plot_POPS_calib([spect_dic_point, spect_dic_area], log =(1,1))
#plot_POPS_calib([spect_dic_point], log =(1,1))
def calc_scatteringPattern():
event = Mie(silent = True)
# event.set_r(1.00)
event.set_wavelength(.405)
event.set_n(1.5)
event.set_nang(1000)
event.POPSdimentions['mirror diameter (mm)'] = 25
event.POPSdimentions['mirror(top)-jet distance (mm)'] = 12.25
event.POPSdimentions['angle: jet-mirrorNormal (rad)'] = np.pi/2.
dataList = []
for i in np.linspace(0.05,0.75,15):
# print 'i', i
event.set_r(i)
event.calc_Natural_hagen()
scattPat = event.YNatural.copy()
xA = event.xAxis.copy()
dataList.append((xA,scattPat))
plot_polar(dataList, log = True)
def calc_intensityAsFktOfRadius_versusRuShan():
""" creates calibration plot for comparison with Ru-Shans calculations
- the aim is to get a pattern at 90 degrees and almost no area of the mirror
"""
event = Mie(silent = True)
# event.set_r(1.00)
event.set_wavelength(.405)
event.set_n(1.52)
event.set_nang(1000)
event.POPSdimentions['mirror diameter (mm)'] = 25.4
event.POPSdimentions['mirror(top)-jet distance (mm)'] = 10
event.POPSdimentions['angle: jet-mirrorNormal (rad)'] = np.pi/2.
xList = []
yList = []
for i in np.linspace(0.05,1.5,5000):
# print 'i', i
event.set_r(i)
event.calc_Natural()
int = event.get_detectableIntensity()
xList.append(i)
yList.append(int)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = np.array(yList)
data['label'] = 'natural'
# plot_POPS_calib([data], log=(1,1), title = 'predicted POPS response as fkt of Particle diameter ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,event.n))
save([data], name = 'POPSint_lamb%s_n%s'%(event.wavelength, event.n))
def calc_intensityAsFktOfRadius_test():
""" based on: calc_intensityAsFktOfRadius_versusRuShan
- the aim is to get a pattern at 90 degrees and almost no area of the mirror
"""
event = Mie(silent = True)
# event.set_r(1.00)
event.set_wavelength(.405)
event.set_n(1.52)
event.set_nang(1000)
event.POPSdimensions['mirror diameter (mm)'] = 1
event.POPSdimensions['mirror(top)-jet distance (mm)'] = 1000
event.POPSdimensions['angle: jet-mirrorNormal (rad)'] = np.pi/4.
xList = []
yList = []
for i in np.linspace(0.05,1.5,1000):
# print 'i', i
event.set_r(i)
event.calc_Natural_hagen()
int = event.get_detectableIntensity()
xList.append(i)
yList.append(int)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = np.array(yList)
data['label'] = 'natural'
plot_POPS_calib([data], log=(1,1), title = 'predicted POPS response as fkt of Particle diameter ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,event.n))
# save([data], name = 'POPSint_lamb%s_n%s'%(event.wavelength, event.n))
def calc_intensityAsFktOfRadius_Pops_1():
""" creates calibration plot for the old pops"""
event = Mie(silent = True)
# event.set_r(1.00)
event.set_nang(1000)
mDiameter = event.POPSdimentions['mirror diameter (mm)'] = 25.
rMirror = 20. # radius of the spherical mirror
event.POPSdimentions['mirror(top)-jet distance (mm)'] = rMirror - tools.segment_hight(rMirror, mDiameter)
event.POPSdimentions['angle: jet-mirrorNormal (rad)'] = np.pi/2.
xList = []
yList = []
for i in np.linspace(0.05,1.5,50):
# print 'i', i
event.set_r(i)
event.calc_Natural()
int = event.get_detectableIntensity()
xList.append(i)
yList.append(int)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = np.array(yList)
data['label'] = 'natural'
plot_POPS_calib([data], log=(1,1), title = 'predicted POPS response as fkt of Particle diameter ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
# save([data], name = 'POPSint_lamb%s_n%s'%(event.wavelength, round(event.n,2)))
def calc_intensityAsFktOfRadius_sensitivityOnRefIdx():
""" creates calibration plot for the old pops"""
event = Mie(silent = True, material = False)
# event.set_r(1.00)
event.set_nang(1000)
event.POPSdimentions['mirror diameter (mm)'] = 25.4
event.POPSdimentions['mirror(top)-jet distance (mm)'] = 10
event.POPSdimentions['angle: jet-mirrorNormal (rad)'] = np.pi/2.
dataList = []
refIdxArray = np.linspace(1.59,1.63,5)
for n in refIdxArray:
xList = []
yList = []
event.set_n(n)
for i in np.linspace(0.05,1.5,1000):
# print 'i', i
event.set_r(i)
event.calc_Natural()
int = event.get_detectableIntensity()
xList.append(i)
yList.append(int)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = np.array(yList)
data['label'] = str(round(n,4))
dataList.append(data)
plot_POPS_calib(dataList, log=(1,1), title = 'predicted POPS response as fkt of Particle diameter ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
save(dataList, name = 'POPSint_lamb%s_n%s'%(event.wavelength, round(event.n,3)))
def gaussian_broadening():
""" this is for PSLs"""
wavelengthArray = np.linspace(.390,.420,50)
noOfPts = np.linspace(0.05,1.5,5000)
event = Mie(silent = True, design = 'POPS 1')
event.set_nang(1000)
dataList = []
sumArray = np.zeros(len(noOfPts))
for l in wavelengthArray:
xList = []
yList = []
event.set_wavelength(l)
for i in noOfPts:
# print 'i', i
event.set_r(i)
event.calc_Natural_hagen()
int = event.get_detectableIntensity()
xList.append(i)
yList.append(int)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = np.array(yList)
data['label'] = str(round(l,5))+ '_' + str(round(event.n,4))
dataList.append(data)
gaussianScaling = tools.gauss_function(l, .405, 0.005) / 100.
print(gaussianScaling)
sumArray += gaussianScaling * data['y']
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray
data['label'] = 'gaus. broadened'
dataList.append(data)
save(dataList, name = 'gaussianBroadening_')
plot_POPS_calib(dataList, log=(1,1), title = 'predicted POPS response as fkt of Particle diameter ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
def dioctyl_sebacate():
"""with gaussian broadening
- this is for Pops 1!!"""
wavelengthArray = np.linspace(.390,.420,50)
noOfPts = np.linspace(0.05,1.5,5000) #radius range
event = Mie(silent = True, design = 'POPS 1', indexOfRef = 1.455) # Ref. for indexOfRef. Patterson 2004
event.set_nang(1000)
dataList = []
sumArray = np.zeros(len(noOfPts))
for l in wavelengthArray:
xList = []
yList = []
event.set_wavelength(l)
for i in noOfPts:
# print 'i', i
event.set_r(i)
event.calc_Natural_hagen()
int = event.get_detectableIntensity()
xList.append(i)
yList.append(int)
data={}
data['x'] = 2 * np.array(xList) #this way we have a diameter range
data['y'] = np.array(yList)
data['label'] = str(round(l,5))+ '_' + str(round(event.n,4))
dataList.append(data)
gaussianScaling = tools.gauss_function(l, .405, 0.005) / 100.
sumArray += gaussianScaling * data['y']
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray
data['label'] = 'gaus_broadened'
dataList.append(data)
save(dataList, name = 'gaussianBroadening_')
plot_POPS_calib(dataList, log=(1,1), title = 'predicted POPS response as fkt of Dioctyl Sebacate diameter ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
def dioctyl_sebacate_netrual_v_paraPerp():
""" - calculates not only natural, but also parallel and perpendicular
- with gaussian broadening
- this is for Pops 1!!"""
wavelengthArray = np.linspace(.390,.420,50)
noOfPts = np.logspace(np.log10(0.05),np.log10(1.5),500) #radius range
event = Mie(silent = True, design = 'POPS 1', indexOfRef = 1.455, diameter = 'dynamic') # Ref. for indexOfRef. Patterson 2004
event.set_nang(1000)
dataList = []
sumArray_natural = np.zeros(len(noOfPts))
sumArray_parallel = np.zeros(len(noOfPts))
sumArray_perpendicular = np.zeros(len(noOfPts))
for l in wavelengthArray:
xList = []
yList_natural = []
yList_parallel = []
yList_perpendicular = []
event.set_wavelength(l)
for i in noOfPts:
# print 'i', i
event.set_r(i)
# event.calc_Natural_hagen()
xList.append(i)
int = event.get_detectableIntensity("natural")
yList_natural.append(int)
int = event.get_detectableIntensity("parallel")
yList_parallel.append(int)
int = event.get_detectableIntensity("perpendicular")
yList_perpendicular.append(int)
# data={}
# data['x'] = 2 * np.array(xList) #this way we have a diameter range
# data['y'] = np.array(yList)
# data['label'] = str(round(l,5))+ '_' + str(round(event.n,4))
# dataList.append(data)
gaussianScaling = tools.gauss_function(l, .405, 0.005) / 100.
sumArray_natural += gaussianScaling * np.array(yList_natural)
sumArray_parallel += gaussianScaling * np.array(yList_parallel)
sumArray_perpendicular += gaussianScaling * np.array(yList_perpendicular)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_natural
data['label'] = 'natural'
dataList.append(data)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_parallel
data['label'] = 'parallel'
dataList.append(data)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_perpendicular
data['label'] = 'perpendicular'
dataList.append(data)
save(dataList, name = '')
plot_POPS_calib(dataList, log=(1,1), title = 'predicted POPS response as fkt of Dioctyl Sebacate diameter ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
def dioctyl_sebacate_netrual_v_paraPerp_exactAngleDependence():
""" - calculates not only natural, but also parallel and perpendicular
- with gaussian broadening
- this is for Pops 1!!"""
wavelengthArray = np.linspace(.400,.410,15)
noOfPts = np.logspace(np.log10(0.05),np.log10(1.5),30) #radius range
event = Mie(silent = True, design = 'POPS 2', indexOfRef = 1.455, diameter = 'dynamic') # Ref. for indexOfRef. Patterson 2004
event.set_nang(20)
dataList = []
sumArray_natural = np.zeros(len(noOfPts))
sumArray_parallel = np.zeros(len(noOfPts))
sumArray_perpendicular = np.zeros(len(noOfPts))
for e,l in enumerate(wavelengthArray):
print('start wavelength %s of %s' % (e, len(wavelengthArray)))
xList = []
yList_natural = []
yList_parallel = []
yList_perpendicular = []
event.set_wavelength(l)
for i in noOfPts:
# print 'start a point'
# print 'i', i
event.set_r(i)
# event.calc_Natural_hagen()
xList.append(i)
int = event.get_detectableIntensity("natural")
yList_natural.append(int)
int = event.get_detectableIntensity("parallel")
yList_parallel.append(int)
int = event.get_detectableIntensity("perpendicular")
yList_perpendicular.append(int)
# data={}
# data['x'] = 2 * np.array(xList) #this way we have a diameter range
# data['y'] = np.array(yList)
# data['label'] = str(round(l,5))+ '_' + str(round(event.n,4))
# dataList.append(data)
gaussianScaling = tools.gauss_function(l, .405, 0.005) / 100.
sumArray_natural += gaussianScaling * np.array(yList_natural)
sumArray_parallel += gaussianScaling * np.array(yList_parallel)
sumArray_perpendicular += gaussianScaling * np.array(yList_perpendicular)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_natural
data['label'] = 'natural'
dataList.append(data)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_parallel
data['label'] = 'parallel'
dataList.append(data)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_perpendicular
data['label'] = 'perpendicular'
dataList.append(data)
save(dataList, name = '')
plot_POPS_calib(dataList, log=(1,1), title = 'predicted POPS response as fkt of Dioctyl Sebacate diameter ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
def dioctyl_sebacate_variousIndices():
""" - calculates not only natural, but also parallel and perpendicular
- with gaussian broadening
- this is for Pops 1!!"""
wavelengthArray = np.linspace(.390,.420,30)
noOfPts = np.logspace(np.log10(0.05),np.log10(1.5),200) #radius range
event = Mie(silent = True, design = 'POPS 2', indexOfRef = 1.455, diameter = 'dynamic') # Ref. for indexOfRef. Patterson 2004
event.set_nang(100)
dataList = []
sumArray_natural = np.zeros(len(noOfPts))
sumArray_parallel = np.zeros(len(noOfPts))
sumArray_perpendicular = np.zeros(len(noOfPts))
for e,l in enumerate(wavelengthArray):
print('start wavelength %s of %s' % (e, len(wavelengthArray)))
xList = []
yList_natural = []
yList_parallel = []
yList_perpendicular = []
event.set_wavelength(l)
for i in noOfPts:
# print 'start a point'
# print 'i', i
event.set_r(i)
# event.calc_Natural_hagen()
xList.append(i)
int = event.get_detectableIntensity("natural")
yList_natural.append(int)
int = event.get_detectableIntensity("parallel")
yList_parallel.append(int)
int = event.get_detectableIntensity("perpendicular")
yList_perpendicular.append(int)
# data={}
# data['x'] = 2 * np.array(xList) #this way we have a diameter range
# data['y'] = np.array(yList)
# data['label'] = str(round(l,5))+ '_' + str(round(event.n,4))
# dataList.append(data)
gaussianScaling = tools.gauss_function(l, .405, 0.005) / 100.
sumArray_natural += gaussianScaling * np.array(yList_natural)
sumArray_parallel += gaussianScaling * np.array(yList_parallel)
sumArray_perpendicular += gaussianScaling * np.array(yList_perpendicular)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_natural
data['label'] = 'natural'
dataList.append(data)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_parallel
data['label'] = 'parallel'
dataList.append(data)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_perpendicular
data['label'] = 'perpendicular'
dataList.append(data)
save(dataList, name = '')
plot_POPS_calib(dataList, log=(1,1), title = 'predicted POPS response as fkt of Dioctyl Sebacate diameter ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
def different_MirrorDistance():
""" - calculates not only natural, but also parallel and perpendicular
- with gaussian broadening
- this is for Pops 1!!"""
wavelengthArray = np.linspace(.400,.410,15)
noOfPts = np.logspace(np.log10(0.05),np.log10(1.5),500) #radius range
event = Mie(silent = True, design = 'POPS 2', indexOfRef = 1.455, diameter = 'dynamic') # Ref. for indexOfRef. Patterson 2004
event.set_nang(100) # number of scatternig angles
event.POPSdimensions['mirror(top)-jet distance (mm)'] = 7.68#+2.159
dataList = []
sumArray_natural = np.zeros(len(noOfPts))
sumArray_parallel = np.zeros(len(noOfPts))
sumArray_perpendicular = np.zeros(len(noOfPts))
for e,l in enumerate(wavelengthArray):
print('start wavelength %s of %s' % (e, len(wavelengthArray)))
xList = []
yList_natural = []
yList_parallel = []
yList_perpendicular = []
event.set_wavelength(l)
for i in noOfPts:
# print 'start a point'
# print 'i', i
event.set_r(i)
# event.calc_Natural_hagen()
xList.append(i)
int = event.get_detectableIntensity("natural")
yList_natural.append(int)
int = event.get_detectableIntensity("parallel")
yList_parallel.append(int)
int = event.get_detectableIntensity("perpendicular")
yList_perpendicular.append(int)
# data={}
# data['x'] = 2 * np.array(xList) #this way we have a diameter range
# data['y'] = np.array(yList)
# data['label'] = str(round(l,5))+ '_' + str(round(event.n,4))
# dataList.append(data)
gaussianScaling = tools.gauss_function(l, .405, 0.005) / 100.
sumArray_natural += gaussianScaling * np.array(yList_natural)
sumArray_parallel += gaussianScaling * np.array(yList_parallel)
sumArray_perpendicular += gaussianScaling * np.array(yList_perpendicular)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_natural
data['label'] = 'natural'
dataList.append(data)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_parallel
data['label'] = 'parallel'
dataList.append(data)
data={}
data['x'] = 2 * np.array(xList)
data['y'] = sumArray_perpendicular
data['label'] = 'perpendicular'
dataList.append(data)
save(dataList, name = '14_04_18_differentMirrorDistances_%s'%event.POPSdimensions['mirror(top)-jet distance (mm)'])
plot_POPS_calib(dataList, log=(1,1), title = 'Different mirror distances')# ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
return dataList
def wavelengthDependence(nm):
dRange = np.logspace(np.log10(0.05),np.log10(1.5),200) #radius range
event = Mie(silent = True, design = 'POPS 2', indexOfRef = 1.455, diameter = 'dynamic') # Ref. for indexOfRef. Patterson 2004
event.set_nang(100) # number of scatternig angles
event.POPSdimensions['mirror(top)-jet distance (mm)'] = 7.68#+2.159
dataList = []
# sumArray_natural = np.zeros(len(noOfPts))
# sumArray_parallel = np.zeros(len(noOfPts))
# sumArray_perpendicular = np.zeros(len(noOfPts))
# for e,l in enumerate(wavelengthArray):
# print 'start wavelength %s of %s'%(e, len(wavelengthArray))
# xList = []
# yList_natural = []
# yList_parallel = []
# yList_perpendicular = []
#nm = 405
event.set_wavelength(nm)
perpInt = []
for i in dRange:
# print 'start a point'
# print 'i', i
event.set_r(i)
# event.calc_Natural_hagen()
int = event.get_detectableIntensity("perpendicular")
perpInt.append(int)
data={}
data['x'] = 2 * np.array(dRange)
data['y'] = perpInt
data['label'] = '%s nm'%nm
dataList.append(data)
save(dataList, name = '14_05_14_nmDependence_IOR1.455')
plot_POPS_calib(dataList, log=(1,1), title = 'Different mirror distances')# ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
return dataList
def DOSversusNHSO(IOR):
"""dioctyl sebacate(1.455) versus Ammonium sulfate (1.40)"""
dRange = np.logspace(np.log10(0.05),np.log10(1.5),200) #radius range
event = Mie(silent = True, design = 'POPS 2', indexOfRef = IOR, diameter = 'dynamic') # Ref. for indexOfRef. Patterson 2004
event.set_nang(100) # number of scatternig angles
event.POPSdimensions['mirror(top)-jet distance (mm)'] = 7.68#+2.159
dataList = []
# sumArray_natural = np.zeros(len(noOfPts))
# sumArray_parallel = np.zeros(len(noOfPts))
# sumArray_perpendicular = np.zeros(len(noOfPts))
# for e,l in enumerate(wavelengthArray):
# print 'start wavelength %s of %s'%(e, len(wavelengthArray))
# xList = []
# yList_natural = []
# yList_parallel = []
# yList_perpendicular = []
nm = .405
event.set_wavelength(nm)
perpInt = []
for i in dRange:
# print 'start a point'
# print 'i', i
event.set_r(i)
# event.calc_Natural_hagen()
int = event.get_detectableIntensity("perpendicular")
perpInt.append(int)
data={}
data['x'] = 2 * np.array(dRange)
data['y'] = perpInt
data['label'] = '%s ior'%IOR
dataList.append(data)
save(dataList, name = '14_05_16_IOR_dipendenc_405nm')
plot_POPS_calib(dataList, log=(1,1), title = 'Different mirror distances')# ($\lambda=$%s nm; $n=$%s)'%(event.wavelength,round(event.n,2)))
return dataList
##################################################################################################################################################################
##################################################################################################################################################################
if __name__ == "__main__":
print('los gehts')
# test_comparison_to_internet()
# test_calc_nintyOnly()
# default()
# bhmie_hagen_test()
# calc_intensityAsFktOfRadius_sensitivityOnRefIdx()
# calc_scatteringPattern()
# calc_nintyOnly()
# gaussian_broadening()
# dioctyl_sebacate()
# dioctyl_sebacate_netrual_v_paraPerp()
# calc_intensityAsFktOfRadius_test()
# dioctyl_sebacate_netrual_v_paraPerp_exactAngleDependence()
# dioctyl_sebacate_variousIndices()
|
mit
|
elkingtonmcb/scikit-learn
|
examples/classification/plot_lda.py
|
70
|
2413
|
"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
|
bsd-3-clause
|
ClimbsRocks/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting.py
|
43
|
39945
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
|
bsd-3-clause
|
ergosimulation/mpslib
|
scikit-mps/examples/mpslib_entropy.py
|
1
|
1622
|
'''
mpslib_entropy:
Compute selfinformation for each realizations and then the Entropy
'''
#%%
import mpslib as mps
import matplotlib.pyplot as plt
import numpy as np
import copy
plt.ion()
#%% Initialize the MPS object, using a specific algorithm (def='mps_snesim_tree')
O=mps.mpslib(method='mps_genesim')
#%% Select number of iterations [def=1]
O.par['n_cond']=9
O.par['simulation_grid_size'][0]=38
O.par['simulation_grid_size'][1]=23
O.par['simulation_grid_size'][2]=1
O.par['hard_data_fnam']='hard.dat'
#%% Use hard data
# Set hard data
d_hard = np.array([[ 3, 3, 0, 1],
[ 8, 8, 0, 0],
[ 12, 3, 0, 1]])
O.d_hard = d_hard
# Set training image
O.ti = mps.trainingimages.strebelle(di=3, coarse3d=1)[0]
#O.plot_ti()
#%% Run MPSlib in simulation mode
O.parameter_filename='mps.txt'
O.par['do_entropy']=1
O.par['n_real']=30
O.par['n_real']=10
O.par['n_max_cpdf_count']=10 # We need ENESIM/GENESIM and not DS
O.par['n_max_ite']=1000000
O.remove_gslib_afterulation=0
O.delete_local_files() # to make sure no old data are floating around
#%%
Onc=[]
H=[]
Hstd=[]
n_cond_arr=[0,1,2,4,8,16]
i=-1
for n_cond in n_cond_arr:
i=i+1
print(n_cond)
Onc.append(copy.deepcopy(O))
Onc[i].par['n_cond']=n_cond
Onc[i].run()
H.append(Onc[i].H)
Hstd.append(Onc[i].Hstd)
plt.figure()
plt.subplot(211)
plt.semilogy(n_cond_arr,H,'-*')
plt.xlabel('n_cond')
plt.ylabel('Entropy')
plt.subplot(212)
plt.errorbar(n_cond_arr, H, Hstd)
plt.yscale('log')
plt.xlabel('n_cond')
plt.ylabel('Entropy')
|
lgpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.