repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
gmatteo/pymatgen | pymatgen/command_line/tests/test_vampire_caller.py | 5 | 2353 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
import pandas as pd
from monty.os.path import which
import pymatgen.command_line.vampire_caller as vampirecaller
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "magnetic_orderings")
@unittest.skipIf(not which("vampire-serial"), "vampire executable not present")
class VampireCallerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("Testing with: ", which("vampire-serial"))
cls.Mn3Al = pd.read_json(os.path.join(test_dir, "Mn3Al.json"))
cls.compounds = [cls.Mn3Al]
cls.structure_inputs = []
cls.energy_inputs = []
for c in cls.compounds:
ordered_structures = list(c["structure"])
ordered_structures = [Structure.from_dict(d) for d in ordered_structures]
epa = list(c["energy_per_atom"])
energies = [e * len(s) for (e, s) in zip(epa, ordered_structures)]
cls.structure_inputs.append(ordered_structures)
cls.energy_inputs.append(energies)
def setUp(self):
pass
def tearDown(self):
warnings.simplefilter("default")
def test_vampire(self):
for structs, energies in zip(self.structure_inputs, self.energy_inputs):
settings = {"start_t": 0, "end_t": 500, "temp_increment": 50}
vc = vampirecaller.VampireCaller(
structs,
energies,
mc_box_size=3.0,
equil_timesteps=1000, # 1000
mc_timesteps=2000, # 2000
user_input_settings=settings,
)
voutput = vc.output
critical_temp = voutput.critical_temp
self.assertAlmostEqual(400, critical_temp, delta=100)
if os.path.exists("Mn3Al.mat"):
os.remove("Mn3Al.mat")
if os.path.exists("Mn3Al.ucf"):
os.remove("Mn3Al.ucf")
if os.path.exists("input"):
os.remove("input")
if os.path.exists("log"):
os.remove("log")
if os.path.exists("output"):
os.remove("output")
if __name__ == "__main__":
unittest.main()
| mit |
JamesMura/elections | apollo/submissions/models.py | 1 | 22019 | # coding: utf-8
from ..core import db
from ..deployments.models import Deployment, Event
from ..formsframework.models import Form
from ..formsframework.parser import Comparator, Evaluator
from ..helpers import compute_location_path
from ..locations.models import Location
from ..participants.models import Participant
from ..users.models import User
from datetime import datetime
from flask.ext.babel import lazy_gettext as _
from flask.ext.mongoengine import BaseQuerySet
from lxml import etree
from mongoengine import Q
from pandas import DataFrame, isnull, Series
import numpy as np
FLAG_STATUSES = {
'no_problem': ('0', _('No Problem')),
'problem': ('2', _('Problem')),
'serious_problem': ('3', _('Serious Problem')),
'verified': ('4', _('Verified')),
'rejected': ('5', _('Rejected'))
}
FLAG_CHOICES = (
('0', _('No Problem')),
('2', _('Problem')),
('3', _('Serious Problem')),
('4', _('Verified')),
('5', _('Rejected'))
)
STATUS_CHOICES = (
('', _(u'Status')),
('0', _(u'Status — No Problem')),
('2', _(u'Status — Unverified')),
('4', _(u'Status — Verified')),
('5', _(u'Status — Rejected'))
)
class SubmissionQuerySet(BaseQuerySet):
# most of the fields below are DBRef fields or not useful to
# our particular use case.
DEFAULT_EXCLUDED_FIELDS = [
'id', 'created', 'updated', 'location', 'deployment'
]
SUBDOCUMENT_FIELDS = ['location_name_path', 'completion']
def filter_in(self, location_spec):
"""Given a single `class`Location instance, or an iterable of
`class`Location instances, this method restricts submissions to
those whose location either exactly match the passed in location(s),
or those whose location are lower than the passed in location(s) in
the location hierarchy.
The multiple location case is merely an extension of the single case.
"""
if isinstance(location_spec, Location):
# checking for a single location
location = location_spec
param = 'location_name_path__{}'.format(location.location_type)
query_kwargs = {
param: location.name
}
return self(Q(location=location) | Q(**query_kwargs))
elif hasattr(location_spec, '__iter__'):
# checking for multiple locations
chain = Q()
for location in location_spec:
if not isinstance(location, Location):
return self.none()
param = 'location_name_path__{}'.format(location.location_type)
query_kwargs = {param: location.name}
chain = Q(location=location) | Q(**query_kwargs) | chain
return self(chain)
# value is neither a Location instance nor an iterable
# producing Location instances
return self.none()
def to_dataframe(self, selected_fields=None, excluded_fields=None):
if excluded_fields:
qs = self.exclude(*excluded_fields)
else:
qs = self.exclude(*self.DEFAULT_EXCLUDED_FIELDS)
if selected_fields:
qs = self.only(*selected_fields)
df = DataFrame(list(qs.as_pymongo())).convert_objects(
convert_numeric=True)
if df.empty:
return df
# add fields with no values
fields = filter(
lambda f: f not in df.columns,
map(lambda field: field.name, [
field for group in self.first().form.groups
for field in group.fields]))
for field in fields:
df[field] = Series(np.nan, index=df.index)
# do cleanup of subdocument fields
for field in self.SUBDOCUMENT_FIELDS:
temp = df.pop(field).tolist()
temp2 = [i if not isnull(i) else {} for i in temp]
df = df.join(DataFrame(temp2))
return df
# Submissions
class Submission(db.DynamicDocument):
'''Submissions represent data collected by participants in response to
questions in Checklist and Critical Incident forms. Submissions are created
prior to data input and are updated whenever data is received. The only
exception is for the storage of Critical Incident reports which create
submissions when data input is received.
The :class:`core.documents.Submission` model
is a :class:`mongoengine.DynamicDocument` and hence, most of it's
functionality isn't stored within the model and gets defined at run time
depending on the configuration of forms, form groups and form fields.
:attr:`updated` is a :class:`mongoengine.db.DateTimeField` that stores the
last time the submission was updated.
:attr:`created` is a :class:`mongoengine.db.DateTimeField` that stores the
date of creation for the submission.
:attr:`contributor` stores the contributing participant for
this submission.
:attr:`form` provides a reference to the form that the submission was
made for.
:attr:`completion` is a dictionary whose keys are the names of the
groups defined on :attr:`form` (if any) and whose values are (as at the
time of writing this) are Complete, Partial or Missing, based on if all
fields within that group have been filled out. Please see the method
_update_completion_status() for details.
:attr:`location_name_path` is a dictionary with location type names as
keys and actual location names as values. Since MongoDB doesn't support
joins, it was created to be a precomputed shortcut to using the location
hierarchy in queries and views without needing several database lookups.
:attr:`sender_verified` is set to True if the sender of a submission has
been verified. For instance, if it was received from a known phone number
in the case of SMS, or a known user account (either via the UI or API).
:attr:`quality_checks` stores the precomputed value of different logical
checks (created at runtime, so we can't predict in advance). An example
logical check would be: for this submission, was the total number of votes
greater than the total number of registered voters?
:attr:`verification_status` stores the overall result of all the logical
checks: are there problems with this data, or is everything ok, or not
enough data to have an opinion? see STATUS_CHOICES for the full list of
possible values.
IMPORTANT: submissions for incident forms get a few more dynamic fields:
- status: whether the incident was confirmed/rejected etc
- witness: whether the contributor actually witnessed the incident,
was reported by a third party, etc
'''
SUBMISSION_TYPES = (
('O', _(u'Observer Submission')),
('M', _(u'Master Submission')),
)
form = db.ReferenceField(Form)
contributor = db.ReferenceField(Participant)
location = db.ReferenceField(Location)
created = db.DateTimeField()
updated = db.DateTimeField()
completion = db.DictField()
location_name_path = db.DictField()
sender_verified = db.BooleanField(default=True)
quality_checks = db.DictField()
confidence = db.DictField()
verification_status = db.StringField()
overridden_fields = db.ListField(db.StringField())
submission_type = db.StringField(
choices=SUBMISSION_TYPES, default='O', required=True)
deployment = db.ReferenceField(Deployment)
event = db.ReferenceField(Event)
meta = {
'queryset_class': SubmissionQuerySet,
}
def _update_completion_status(self):
'''Computes the completion status of each form group for a submission.
Should be called automatically on save, preferably in the `clean`
method.'''
if self.master != self:
for group in self.form.groups:
completed = [getattr(self.master, f.name, None) is not None
for f in group.fields]
if all(completed):
self.completion[group.name] = 'Complete'
elif any(completed):
self.completion[group.name] = 'Partial'
else:
self.completion[group.name] = 'Missing'
elif self.master == self:
# update sibling submissions
for submission in self.siblings:
for group in self.form.groups:
completed = [getattr(self, f.name, None) is not None
for f in group.fields]
if all(completed):
submission.completion[group.name] = 'Complete'
elif any(completed):
submission.completion[group.name] = 'Partial'
else:
submission.completion[group.name] = 'Missing'
submission.save(clean=False)
def _update_confidence(self):
'''Computes the confidence score for the fields in the master.
Should be called automatically on save, preferably in the `clean`
method.'''
if self.submission_type == 'M':
for group in self.form.groups:
for field in group.fields:
score = None
name = field.name
# if the field has been overridden then we trust the
# data manager/clerk and we have 100% confidence in the
# data
if name in self.overridden_fields:
score = 1
self.confidence[name] = score
continue
values = [getattr(submission, name, None)
for submission in self.siblings]
unique = list(set(values))
# if all values were reported and are the same then
# we have 100% confidence in the reported data
if (
values and len(unique) == 1
and unique[0] is not None
):
score = 1
# if no values were reported then we resort to the default
elif (
values and len(unique) == 1
and unique[0] is None
):
score = None
else:
# filter out only reported values
n_values = filter(lambda v: v is not None, values)
n_unique = list(set(n_values))
# if there are different reported values then our score
# is zero (we have no confidence in the data)
if (len(n_unique) > 1):
score = 0
# we compute the score based on the reported over
# the total expected
else:
score = len(n_values) / len(values)
self.confidence[name] = score
def _update_data_fields(self):
'''This little utility simply sets any boolean fields to None.
Have found that having boolean fields have the value False
causes problems in analysis.'''
fields = [
field for group in self.form.groups
for field in group.fields]
boolean_fields = [
field for field in fields if field.represents_boolean]
single_value_fields = [
field for field in fields
if field.options is not None and
field.allows_multiple_values is False]
for field in boolean_fields:
if not getattr(self, field.name, None):
# dictionary-style access will fail. it's a MongoEngine issue
# self[field.name] = None
setattr(self, field.name, None)
for field in single_value_fields:
value = getattr(self, field.name, '')
if value == '':
setattr(self, field.name, None)
elif isinstance(value, int):
setattr(self, field.name, value)
elif isinstance(value, str) and value.isdigit():
setattr(self, field.name, int(value))
def _update_master(self):
'''TODO: update master based on agreed algorithm'''
master = self.master
if master and master != self:
# fetch only fields that have not been overridden
fields = filter(lambda f: f.name not in master.overridden_fields, [
field for group in self.form.groups
for field in group.fields])
for field in fields:
if (
getattr(self, field.name, None) != getattr(
master, field.name, None)
):
setattr(
master, field.name, getattr(self, field.name, None))
master._compute_verification()
master._update_confidence()
master.updated = datetime.utcnow()
master.save(clean=False)
def _compute_verification(self):
'''Precomputes the logical checks on the submission.'''
if self.submission_type != 'M':
# only for master submissions
return
verified_flag = FLAG_STATUSES['verified'][0]
rejected_flag = FLAG_STATUSES['rejected'][0]
comparator = Comparator()
NO_DATA = 0
OK = 1
UNOK = 2
flags_statuses = []
for flag in self.form.quality_checks:
evaluator = Evaluator(self)
try:
lvalue = evaluator.eval(flag['lvalue'])
rvalue = evaluator.eval(flag['rvalue'])
# the comparator setting expresses the relationship between
# lvalue and rvalue
if flag['comparator'] == 'pctdiff':
# percentage difference between lvalue and rvalue
try:
diff = abs(lvalue - rvalue) / float(
max([lvalue, rvalue]))
except ZeroDivisionError:
diff = 0
elif flag['comparator'] == 'pct':
# absolute percentage
try:
diff = float(lvalue) / float(rvalue)
except ZeroDivisionError:
diff = 0
else:
# value-based comparator
diff = abs(lvalue - rvalue)
# evaluate conditions and set flag appropriately
if comparator.eval(flag['okay'], diff):
flag_setting = FLAG_STATUSES['no_problem'][0]
flags_statuses.append(OK)
elif comparator.eval(flag['serious'], diff):
flag_setting = FLAG_STATUSES['serious_problem'][0]
flags_statuses.append(UNOK)
elif comparator.eval(flag['problem'], diff):
flag_setting = FLAG_STATUSES['problem'][0]
flags_statuses.append(UNOK)
else:
# if we have no way of determining, we assume it's okay
flag_setting = FLAG_STATUSES['no_problem'][0]
flags_statuses.append(OK)
# setattr(self, flag['storage'], flag_setting)
self.quality_checks[flag['storage']] = flag_setting
except TypeError:
# no sufficient data
# setattr(self, flag['storage'], None)
try:
self.quality_checks.pop(flag['storage'])
except KeyError:
pass
flags_statuses.append(NO_DATA)
# compare all flags and depending on the values, set the status
if not self.verification_status in [verified_flag, rejected_flag]:
if all(map(lambda i: i == NO_DATA, flags_statuses)):
self.verification_status = None
elif any(map(lambda i: i == UNOK, flags_statuses)):
self.verification_status = FLAG_STATUSES['problem'][0]
elif any(map(lambda i: i == OK, flags_statuses)):
self.verification_status = FLAG_STATUSES['no_problem'][0]
def clean(self):
# update location name path if it does not exist.
# unlike for participants, submissions aren't 'mobile', that is,
# there doesn't seem to be a use case for transferring submissions.
# at least, not at the time of writing
if not self.location_name_path:
self.location_name_path = compute_location_path(self.location)
# cleanup data fields
self._update_data_fields()
# update the master submission
self._update_master()
# update completion status
self._update_completion_status()
# and compute the verification
self._compute_verification()
# update the confidence
self._update_confidence()
# update the `updated` timestamp
self.updated = datetime.utcnow()
@property
def master(self):
# a master submission is its own master
if self.submission_type == 'M':
return self
if not hasattr(self, '_master'):
if self.form.form_type == 'INCIDENT':
self._master = None
return self._master
try:
self._master = Submission.objects.get(
form=self.form,
location=self.location,
created__gte=self.created.combine(self.created,
self.created.min.time()),
created__lte=self.created.combine(self.created,
self.created.max.time()),
submission_type='M',
deployment=self.deployment,
event=self.event
)
except self.DoesNotExist:
self._master = Submission.objects.create(
form=self.form,
location=self.location,
created=self.created,
submission_type='M',
deployment=self.deployment,
event=self.event
)
return self._master
@property
def siblings(self):
if not hasattr(self, '_siblings'):
if self.pk:
self._siblings = Submission.objects(
form=self.form,
location=self.location,
created__gte=self.created.combine(self.created,
self.created.min.time()),
created__lte=self.created.combine(self.created,
self.created.max.time()),
submission_type='O', # exclude master
deployment=self.deployment,
event=self.event,
pk__ne=self.pk
)
else:
self._siblings = Submission.objects(
form=self.form,
location=self.location,
created__gte=self.created.combine(self.created,
self.created.min.time()),
created__lte=self.created.combine(self.created,
self.created.max.time()),
submission_type='O', # exclude master
deployment=self.deployment,
event=self.event
)
return self._siblings
@property
def versions(self):
return SubmissionVersion.objects(submission=self)
@property
def comments(self):
return SubmissionComment.objects(submission=self)
def verify_phone(self, number):
latest_sms_version = self.versions.filter(channel='SMS').first()
if not latest_sms_version:
return False
if latest_sms_version.identity == number:
return True
return False
def to_xml(self):
document = self.form.to_xml()
data = document.xpath('//model/instance/data')[0]
data.set('id', unicode(self.id))
for tag in self.form.tags:
value = getattr(self, tag, None)
value = '' if value is None else unicode(value)
element = data.xpath('//{}'.format(tag))[0]
element.text = value
return document
class SubmissionComment(db.Document):
'''Stores user comments.'''
submission = db.ReferenceField(Submission)
user = db.ReferenceField(User)
comment = db.StringField()
submit_date = db.DateTimeField(default=datetime.utcnow)
deployment = db.ReferenceField(Deployment)
class SubmissionVersion(db.Document):
'''Stores versions of :class:`core.documents.Submission`
instances'''
CHANNEL_CHOICES = (
('SMS', _('SMS')),
('WEB', _('Web')),
('API', _('API'))
)
submission = db.ReferenceField(Submission, required=True)
data = db.StringField(required=True)
timestamp = db.DateTimeField(default=datetime.utcnow)
channel = db.StringField(choices=CHANNEL_CHOICES, required=True)
identity = db.StringField(default='unknown', required=True)
deployment = db.ReferenceField(Deployment)
meta = {
'ordering': ['-timestamp']
}
@property
def version_number(self):
versions = list(SubmissionVersion.objects(
deployment=self.deployment,
submission=self.submission
).scalar('id'))
return versions.index(self.id) + 1
| gpl-3.0 |
cmscaltech/fdtcp | hdfs/intervals_distribution.py | 2 | 4094 | #!/usr/bin/env python
"""
Script for plot on occurrence study of
"AlreadyBeingCreatedException" in Hadoop DFS.
#40 https://trac.hep.caltech.edu/trac/fdtcp/ticket/40
#39 https://trac.hep.caltech.edu/trac/fdtcp/ticket/39 (parent ticket)
AlreadyBeingCreated-log_file_names - list of transfer separate log file
names when this exception occurred (61 cases during
2011-04-12--06h:43m to 2011-04-14--10h:46m (~52h))
details on #5:comment:20
AlreadyBeingCreated-timestamps - just timestamps extracted
the timestamps are times of when the transfer was initiated
on the fdtcp side, not exactly time when the exception occurred but
for occurrence dependency study it should be approximately enough.
"""
from __future__ import print_function
from __future__ import division
from past.utils import old_div
import time
import sys
import datetime
import numpy
import pylab
from matplotlib.dates import date2num
import matplotlib as mpl
"""
help
a = [10, 20, 22, 24, 25]
b = [1.2, 1, 0.9, 1.3, 1.9]
pylab.plot(a) # generates default x data
pylab.plot(b)
pylab.plot(a, b, 'rs', a, b, 'k')
"""
BIN_SIZE = 4
# max. pause during which the exception didn't occur was over 5h, so
# make the entire period 6h (360mins)
ENTIRE_PERIOD = 360
class PlotData(object):
"""
PlotData - time bins are BIN_SIZE minutes bins into which fall
exception events offsets from the previous occurrence.
"""
def __init__(self):
self.timeBins = []
self.x = [] # what will be plotted - number of minutes bins on X axis
self.y = [] # what will be plotted - number of occurrences in the time bin
# make bins of BIN_SIZE up ENTIRE_PERIOD
pd = PlotData()
for i in range(BIN_SIZE, ENTIRE_PERIOD, BIN_SIZE):
hour = old_div(i, 60)
min = i - (hour * 60)
t = datetime.time(hour, min)
pd.timeBins.append(t)
pd.x.append(i)
pd.y.append(0)
# reference time for calculating time delta, time difference
refDelta = datetime.time(0, BIN_SIZE)
datetimes = [] # on x axis
for dt in open("AlreadyBeingCreated-timestamps", 'r'):
dt = dt.strip()
dt = dt.split('-')
dt = [int(c) for c in dt]
dObj = datetime.datetime(*dt)
delta = None
# can only calculate delta in the second iteration
if len(datetimes) != 0:
delta = dObj - previous
previous = dObj
datetimes.append(date2num(dObj))
# can't do anything on the first iteration
if not delta:
continue
# delta is in form 0:18:51.515249
sDelta = str(delta).split(':')
iDelta = [int(c) for c in (sDelta[0], sDelta[1])]
deltaMin = (60 * iDelta[0]) + iDelta[1]
for i in range(len(pd.timeBins)):
calc = abs(deltaMin - pd.x[i])
# "deltaMin in range(4/2)" makes the first bin since the subtraction
# will still be larger than the half size of the bin ...
if calc <= old_div(BIN_SIZE, 2) or deltaMin in range(old_div(BIN_SIZE, 2)):
pd.y[i] += 1
#print ("%s falls into %s (occup:%s)" % (delta, pd.x[i], pd.y[i]))
break
else:
print("not binned: %s %s" % (delta, deltaMin))
print(pd.y)
t = 0
for c in pd.y:
t += c
print ("number of total occurrences: %s (must be the same as number of "
"lines in the input file - 1)" % t)
# process result lists - consider only those which has occurrence > 0
toPlotX = []
toPlotY = []
for i in range(len(pd.y)):
if pd.y[i] > 0:
toPlotX.append(pd.x[i])
toPlotY.append(pd.y[i])
print("###### to plot:")
print(toPlotX)
print(toPlotY)
pylab.setp(
pylab.gca().get_xticklabels(),
rotation=45,
horizontalalignment='right')
pylab.plot(toPlotX, toPlotY, 'rs')
pylab.xlabel(
"%s [min] time offset bins (time from previous occurrence)" %
BIN_SIZE)
pylab.ylabel("number of occurrences with corresponding time offset")
pylab.title("AlreadyBeingCreated HDFS exceptions time offset occurrences")
pylab.grid(True)
# saves plot into a png file
# pylab.savefig('simple_plot')
#pylab.subplots_adjust(left=0.3, bottom=0.3)
# ggpylab.subplots_adjust(bottom=0.18)
pylab.show()
| apache-2.0 |
elkingtonmcb/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
joshloyal/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
orlox/massive_bins_2015 | 2016_ULX/scripts/lumdist/lum_dist.py | 1 | 9966 | #!/usr/bin/env python
import numpy as np
from pylab import *
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
import matplotlib.patches as patches
from scipy.interpolate import griddata
import math
import scipy
from matplotlib import ticker
import sys
import os
import mmap
import itertools
import matplotlib as mpl
import matplotlib.gridspec as grd
from matplotlib.colors import LogNorm
params = {'backend': 'pdf',
'figure.figsize': [4.3, 10],
'font.family':'serif',
'font.size':10,
'font.serif': 'Times Roman',
'axes.titlesize': 'medium',
'axes.labelsize': 'medium',
'legend.fontsize': 8,
'legend.frameon' : False,
'text.usetex': True,
'figure.dpi': 600,
'lines.markersize': 2,
'lines.linewidth': 3,
'lines.antialiased': False,
'path.simplify': False,
'legend.handlelength':3,
'figure.subplot.bottom':0.05,
'figure.subplot.top':0.92,
'figure.subplot.left':0.15,
'figure.subplot.right':0.95}
mpl.rcParams.update(params)
clight = 3e10
cgrav = 6.67e-8
msun = 2e33
Lsun = 3.9e33
WHITE = (1.00,1.00,1.00)
BLACK = (0.00,0.00,0.00)
ORANGE = (0.90,0.60,0.00)
SKY_BLUE = (0.35,0.70,0.90)
BLUE_GREEN = (0.00,0.60,0.50)
YELLOW = (0.95,0.90,0.25)
BLUE = (0.00,0.45,0.70)
VERMILLION = (0.80,0.40,0.00)
RED_PURPLE = (0.80,0.60,0.70)
#hexcols[0] dark bluish
#hexcols[1] light blue
#hexcols[2] greenish
#hexcols[3] dark green
#hexcols[4] brownish
#hexcols[5] light brown
#hexcols[6] pinkish
#hexcols[7] dark something redish
#hexcols[8] magentish
hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',\
'#CC6677', '#882255', '#AA4499', '#661100', '#6699CC', '#AA4466','#4477AA']
def clamp(val, minimum=0, maximum=255):
if val < minimum:
return minimum
if val > maximum:
return maximum
return val
def colorscale(hexstr, scalefactor):
"""
Scales a hex string by ``scalefactor``. Returns scaled hex string.
To darken the color, use a float value between 0 and 1.
To brighten the color, use a float value greater than 1.
>>> colorscale("#DF3C3C", .5)
#6F1E1E
>>> colorscale("#52D24F", 1.6)
#83FF7E
>>> colorscale("#4F75D2", 1)
#4F75D2
"""
hexstr = hexstr.strip('#')
if scalefactor < 0 or len(hexstr) != 6:
return hexstr
r, g, b = int(hexstr[:2], 16), int(hexstr[2:4], 16), int(hexstr[4:], 16)
r = clamp(r * scalefactor)
g = clamp(g * scalefactor)
b = clamp(b * scalefactor)
return "#%02x%02x%02x" % (r, g, b)
range_integral = 0.0194213341446518*(2.56+0.301)
limited_range = 0.004105169206845768*(0.5+0.301)
lums_X = [\
2.3,
2.1,
6.7,
10.4,
24.8,
31.1,
2.1,
6.9,
11.0,
36.0,
2.4,
6.0,
3.0,
3.7,
5.4,
1.5,
25.7,
1.9,
32.9,
2.4,
5.2,
2.8,
3.6,
5.2,
7.6,
2.7,
5.4,
11.9,
2.6,
14.4,
1.7,
2.1,
1.6,
5.3,
5.5,
1.5,
19.8,
6.3,
6.5,
2.9,
3.5,
8.3,
10.4,
17.9,
3.9,
16.8,
1.8,
3.6,
21.8,
1.2,
1.7,
1.2,
1.2,
3.1,
30.4,
1.4,
12.2,
3.3,
3.5,
6.9,
2.2,
2.1,
1.3,
3.9,
1.0,
3.1,
14.4,
2.4,
14.2,
2.5,
34.0,
3.1,
2.0,
3.3,
5.7,
4.5,
3.2,
3.7,
2.4]
lums_cts = [
0.8,
1.9,
3.9,
4.7,
4.2,
19.9,
2.4,
1.7,
3.4,
2.3,
1.3,
5.7,
1.1,
1.1,
6.6,
2.5,
1.0,
1.7,
5.0,
1.1,
4.2,
6.0,
1.1,
2.8,
2.5,
0.6,
1.7,
0.3,
0.6,
0.4,
0.2,
2.3,
2.6,
1.2,
1.6,
1.4,
1.9,
4.6,
1.2,
0.9,
1.0,
14.1,
1.7,
11.4,
3.5,
1.4,
1.6,
1.1,
1.8,
5.6,
10.8,
1.4,
1.2,
2.5,
2.4,
0.8,
0.9,
7.3,
2.2,
1.7,
1.8,
3.2,
21.4,
2.6,
14.4,
2.5,
1.4,
1.1,
3.2,
1.2,
1.3,
1.7,
1.4,
1.5,
2.7,
2.1,
8.3,
0.5,
2.0,
3.4,
0.9,
0.7,
1.7,
1.2,
0.8,
2.6,
1.1,
2.6,
4.7,
2.4,
8.3,
1.6,
0.7,
0.6,
0.3,
6.8,
0.4,
1.1,
6.5,
1.5,
1.0,
1.5,
1.7]
Lbins_obs = 10**np.linspace(0,np.log10(50),6)
hist_X, bin_edges = np.histogram(lums_X, bins=Lbins_obs)
hist_X = hist_X.astype(float)
for k in range(len(hist_X)):
hist_X[k] = float(hist_X[k]) / (np.log10(Lbins_obs[k+1])-np.log10(Lbins_obs[k]))/np.log(10)
hist_X = hist_X / np.max(hist_X)
left,right = bin_edges[:-1],bin_edges[1:]
X_X = np.array([left,right]).T.flatten()
Y_X = np.array([hist_X,hist_X]).T.flatten()
X_X = np.append(X_X,X_X[-1])
Y_X = np.append(Y_X, 0)
hist_cts, bin_edges = np.histogram(lums_cts, bins=Lbins_obs)
hist_cts = hist_cts.astype(float)
for k in range(len(hist_cts)):
hist_cts[k] = float(hist_cts[k]) / (np.log10(Lbins_obs[k+1])-np.log10(Lbins_obs[k]))/np.log(10)
hist_cts = hist_cts / np.max(hist_cts)
left,right = bin_edges[:-1],bin_edges[1:]
X_cts = np.array([left,right]).T.flatten()
Y_cts = np.array([hist_cts,hist_cts]).T.flatten()
X_cts = np.append(X_cts,X_cts[-1])
Y_cts = np.append(Y_cts,0)
Lgrimm = np.linspace(0.1,20)
Ygrimm = 3.3*np.power(10*Lgrimm,-0.61)
Lgrimm = np.append(Lgrimm, Lgrimm[-1])
Ygrimm = np.append(Ygrimm, 0)
#fig, axes= plt.subplots(1)
gs = grd.GridSpec(8, 1, wspace=0.0, hspace=0.0)
folders = ["-2.500","-3.000","-3.500","-4.000","-4.500","-5.000","-5.500","-6.000"]
#folders = ["-2.500"]
qratios = ["0.050","0.100","0.150","0.200","0.250","0.300","0.350","0.400","0.450","0.500","0.550","0.600"]
for i, folder in enumerate(folders):
axes = plt.subplot(gs[i])
luminosities = []
spins = []
superedd = []
bh_masses = []
donor_masses = []
weights = []
for qratio in qratios:
file_name = "mt_data_"+folder+"_"+qratio+".dat"
print(file_name)
try:
data = np.loadtxt(file_name, skiprows=2, unpack = True)
if data.size == 0: continue
except:
print("cant open file, or no data", file_name)
continue
luminosities.append(data[11])
spins.append(data[12])
superedd.append(data[10]-data[9])
bh_masses.append(data[6])
donor_masses.append(data[7])
weights.append(data[4]*data[8])
luminosities = np.power(10,np.concatenate(luminosities))*Lsun/1e39
spins = np.concatenate(spins)
superedd = np.concatenate(superedd)
bh_masses = np.log10(np.concatenate(bh_masses))
donor_masses = np.log10(np.concatenate(donor_masses))
weights = np.concatenate(weights)
sum_weights = np.sum(weights)
sum_weights_low = np.sum(weights[bh_masses<np.log10(100)])
sum_weights_high = np.sum(weights[bh_masses>np.log10(100)])
#weights = weights/sum_weights
print("sources per SFR:", sum_weights*0.01/3)
print("sources per SFR low:", sum_weights_low*0.01/3)
print("sources per SFR high:", sum_weights_high*0.01/3)
Lbins = 10**np.linspace(-1,3,40)
hist, bin_edges = np.histogram(luminosities, bins=Lbins, weights = weights*0.01/3)
for k in range(len(hist)):
hist[k] = hist[k] / (np.log10(Lbins[k+1])-np.log10(Lbins[k]))/np.log(10)
left,right = bin_edges[:-1],bin_edges[1:]
X_Z = np.array([left,right]).T.flatten()
Y_Z = np.array([hist,hist]).T.flatten()
scale_array = np.power(10,\
np.minimum(np.maximum(superedd,0),np.log10(3)))
hist2, bin_edges = np.histogram(luminosities*scale_array, bins=Lbins, weights = weights*0.01/3)
for k in range(len(hist)):
hist2[k] = hist2[k] / (np.log10(Lbins[k+1])-np.log10(Lbins[k]))/np.log(10)
left,right = bin_edges[:-1],bin_edges[1:]
X_Z2 = np.array([left,right]).T.flatten()
Y_Z2 = np.array([hist2,hist2]).T.flatten()
axes.fill_between(X_Z,1e-5*np.ones(len(Y_Z)),Y_Z, color=hexcols[6], alpha = 0.4, label="This work", linewidth = 3)
axes.fill_between(X_Z2,1e-5*np.ones(len(Y_Z2)),Y_Z2, linestyle=":", color=hexcols[6], alpha = 0.2, label="This work ($3\\times \dot{M}_{\\rm Edd}$)", linewidth = 3)
axes.plot(X_X, Y_X, color=hexcols[1], label="Swartz et al. (2011), $L_{\\rm X}$")
axes.plot(X_cts, Y_cts, color =hexcols[3], label="Swartz et al. (2011), $L_{\\rm cnt}$")
axes.plot(Lgrimm, Ygrimm, color =hexcols[0], label="Grimm et al. (2003)", linestyle="--")
if i==0:
axes.legend(loc='upper left', bbox_to_anchor=(-0.15, 1.7),
ncol=2, scatterpoints=1,prop={'size':9})
axes.set_yscale("log")
axes.set_xscale("log")
axes.set_ylim(0.02,20)
axes.set_xlim(0.5,200)
axes.set_ylabel("$L\\times dN/dL$")
axes.text(40,4,"$\\log Z="+folder[:4]+"$")
if i!= 7:
axes.set_xticklabels([])
axes.set_xlabel("$L\\;[10^{39}\\;\\rm erg\\; s^{-1}]$")
plt.savefig("plots/lum_dist.pdf", dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False)
| gpl-3.0 |
mattea/mattea-utils | matteautils/match/vectorsim.py | 1 | 17233 | #!/usr/bin/python
from __future__ import division
import math
import signal
import sys
import numpy as np
from scipy.spatial import distance
from munkres import munkres
from . import Matcher
from itertools import izip
from scipy.stats import kendalltau
from matteautils.base import printd
import matteautils.config as conf
from sklearn.neighbors import kneighbors_graph
from scipy.sparse.csgraph import minimum_spanning_tree
from skidmarks import wald_wolfowitz
def munkres_handler(signum, frame):
printd("Can't keep waiting...")
print frame
raise Exception("ran out of time...")
#a = np.array([[.1]*5,[.2]*5,[.3]*5])
#b = np.array([[.1]*5,[.2,.2,.2,.2,.3],[0,0,0,0,1]])
#c = 1 - cdist(a,b,'cosine')
def multiCdist(s1, s2, metric):
if s1.ndims == 1:
return metric(s1["vector"], s2["vector"])
else:
return sum([metric(x, y) * a for x, y, a in izip(s1["vector"], s2["vector"], conf.alphas)])
def getMetric(metric):
if isinstance(metric, basestring):
if metric in locals():
return locals()[metric]
else:
return lambda x, y: distance.cdist(x["vector"], y["vector"], metric=metric)
else:
return metric
# Lots of possible fns:
# http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.spatial.distance.cdist.html
def pairdist(s1, s2, distfn=getMetric('cosine')):
if type(s1[0]) == np.ndarray:
if len(s1) == 0 or len(s2) == 0:
return np.ndarray(0)
return distfn(s1, s2)
else:
return distfn(s1["vector"], s2["vector"])
#return distfn([x.vector for x in s1], [x.vector for x in s2])
class VecSim(Matcher):
def __init__(self, df=None, metric='cosine'):
self.metric = getMetric(metric)
@classmethod
def normalize(self, vec, df=None):
return vec, np.sum(vec, axis=0)
def match(self, pair):
self.pair = pair
self.s1 = pair.s1
self.s2 = pair.s2
self.tsim = 1 - self.metric([self.s1["vec_sum"]], [self.s2["vec_sum"]])[0, 0]
self.nmatches = -1
self.start = -1
self.end = len(self.s2["vector"]) - 1
return self.tsim
class MinDistSim(Matcher):
def __init__(self, df=None, metric='cosine', maxsent=20, ngram=1, recurse=False, dimfeatures=True):
#self.dist = ndist(s1, s2)
#s1 = s1["vector"]
#s2 = s2["vector"]
self.metric = getMetric(metric)
self._names = ["MDS_" + x for x in ["tsim", "lsim", "kdist", "kldist", "ldist", "kt", "tmax", "tmin", "tsum", "tstd", "tmaxidf", "tsumidf"]]
maxsent = maxsent - ngram + 1
if dimfeatures:
self._names.extend(["MDS_w%03d" % x for x in range(maxsent)])
self.maxsent = maxsent
self.ngram = ngram
self.recurse = recurse
self.vocab = df
self.wordcount = df.total
self.dimfeatures = dimfeatures
def match(self, pair):
s1l = len(pair.s1["vector"])
s2l = len(pair.s2["vector"])
self.tsim = float('-9999')
self.lsim = float('-9999')
self.minlen = min(s1l, s2l)
self.maxlen = max(s1l, s2l)
self.nmatches = 0
self.start = -1
self.end = -1
if (self.minlen == 0 or
self.maxlen >= 100):
return self.tsim
# For simplicity in later code, make the shorter one first
if s1l < s2l:
self.s1 = pair.s1
self.s2 = pair.s2
s1l = len(pair.s1["vector"])
s2l = len(pair.s2["vector"])
else:
self.s1 = pair.s2
self.s2 = pair.s1
wc = self.wordcount
if "wv_idfs" not in self.s1:
self.s1["wv_idfs"] = [math.log(wc / self.vocab[x], 2) for x in self.s1["wv_tokens"]]
if "wv_idfs" not in self.s2:
self.s2["wv_idfs"] = [math.log(wc / self.vocab[x], 2) for x in self.s2["wv_tokens"]]
if self.ngram > 1:
ng = self.ngram
v1 = self.s1["vector"]
v2 = self.s2["vector"]
t1 = self.s1["wv_tokens"]
t2 = self.s2["wv_tokens"]
#idf1 = self.s1["wv_idfs"]
#idf2 = self.s2["wv_idfs"]
weights1 = self.s1["weights"]
weights2 = self.s2["weights"]
nv1 = [sum(v1[i:i + ng]) for i in range(max(1, len(v1) - ng + 1))]
nv2 = [sum(v2[i:i + ng]) for i in range(max(1, len(v2) - ng + 1))]
nt1 = ["_".join(t1[i:i + ng]) for i in range(max(1, len(t1) - ng + 1))]
nt2 = ["_".join(t2[i:i + ng]) for i in range(max(1, len(t2) - ng + 1))]
#nidf1 = [max(idf1[i:i + ng]) for i in range(max(1, len(idf1) - ng + 1))]
#nidf2 = [max(idf2[i:i + ng]) for i in range(max(1, len(idf2) - ng + 1))]
nweights1 = [max(weights1[i:i + ng]) for i in range(max(1, len(weights1) - ng + 1))]
nweights2 = [max(weights2[i:i + ng]) for i in range(max(1, len(weights2) - ng + 1))]
#self.s1 = {"vector": nv1, "wv_tokens": nt1, "wv_idfs": nidf1}
#self.s2 = {"vector": nv2, "wv_tokens": nt2, "wv_idfs": nidf2}
self.s1 = {"vector": nv1, "wv_tokens": nt1, "weights": nweights1}
self.s2 = {"vector": nv2, "wv_tokens": nt2, "weights": nweights2}
self.minlen = max(self.minlen - ng + 1, 1)
self.maxlen = max(self.maxlen - ng + 1, 1)
self.dists = [1] * self.minlen
self.pair = pair
#self.dist = pairdist(self.s1["vector"], self.s2["vector"], fn=self.metric)
#self.dist = pairdist(self.s1, self.s2, fn=self.metric)
dist = self.metric(self.s1, self.s2)
# scale by max of idf
#for i in range(dist.shape[0]):
# for j in range(dist.shape[1]):
# dist[i][j] *= max(self.s1["wv_idfs"][i], self.s2["wv_idfs"][j])
self.matchv = np.zeros(dist.shape, int)
np.fill_diagonal(self.matchv, 1)
if np.sum(dist) == 0:
self.tsim = 1
self.nmatches = min(dist.shape)
self.start = 0
self.end = dist.shape[1] - 1
return self.tsim
if (dist == dist[0]).all():
self.tsim = 1 - sum(dist[0])
self.nmatches = min(dist.shape)
self.start = 0
self.end = dist.shape[1] - 1
return self.tsim
if (dist.T == dist[:, 0]).all():
self.tsim = 1 - sum(dist[:, 0])
self.nmatches = min(dist.shape)
self.start = 0
self.end = dist.shape[1] - 1
return self.tsim
signal.signal(signal.SIGALRM, munkres_handler)
signal.alarm(10)
try:
matches = munkres(dist)
except Exception, e:
printd(e)
printd("dist: " + dist.shape)
printd(dist)
return self.tsim
signal.alarm(0)
self.matchv = matches
tdist = 0
tmaxidf = 0
tsumidf = 0
nmatches = 0
mstart = dist.shape[1]
mend = 0
#print self.s1["text"]
#print self.s2["text"]
#print " ".join(self.s1["wv_tokens"])
#print " ".join(self.s2["wv_tokens"])
s1tok = self.s1["wv_tokens"]
s2tok = self.s2["wv_tokens"]
matcharr = [0] * matches.shape[0]
dists = [0] * matches.shape[0]
matchedy = [0] * matches.shape[1]
for i in range(matches.shape[0]):
for j in range(matches.shape[1]):
if matches[i, j]:
matchedy[j] = 1
tdist += dist[i, j]
#tmaxidf += dist[i, j] * max(self.s1["wv_idfs"][i], self.s2["wv_idfs"][j])
#tsumidf += dist[i, j] * sum((self.s1["wv_idfs"][i], self.s2["wv_idfs"][j]))
wi = self.s1["weights"][i]
wj = self.s2["weights"][j]
tmaxidf += dist[i, j] * max(wi, wj)
tsumidf += dist[i, j] * sum((wi, wj))
printd("%s\t%s\t%0.4f\t%0.4f\t%0.4f" % (s1tok[i], s2tok[j], dist[i, j], wi, wj), level=1, sock=sys.stdout)
nmatches += 1
matcharr[i] = j
dists[i] = dist[i, j]
if j < mstart:
mstart = j
if j > mend:
mend = j
ldist = tdist
tdist = tdist * max(dist.shape) / pow(min(dist.shape), 2)
tmaxidf = tmaxidf * max(dist.shape) / pow(min(dist.shape), 2)
tsumidf = tsumidf * max(dist.shape) / pow(min(dist.shape), 2)
kt, ktp = kendalltau(range(len(matcharr)), matcharr)
printd("Score: %0.4f\t%0.4f\t%0.4f\tLabel: %g\n" % (tdist, tmaxidf, tsumidf, pair.label), level=1, sock=sys.stdout)
if self.recurse:
# Remove matches from dist array, and rerun munkres
# Repeat until dist array is empty
pass
else:
for i in range(matches.shape[1]):
if not matchedy[i]:
ldist += min(matches[:, i])
ldist /= max(dist.shape)
# TODO:
# Dist penalty is at most beta
# The problem with this is that there may be a better pairing between the two sentences
# if you optimize for mindist with dist penalty.
# Also could add a weight to each pairing like IDF, most important for the
# summing, but a different sum would again potentially affect the optimal
# match.
beta = 1
self.kdist = tdist * (1 + beta * (kt + 1) / 2)
self.kldist = ldist * (1 + beta * (kt + 1) / 2)
self.ldist = ldist
#print "Score: %g" % tsim
#print "Label: %g" % self.pair.label
self.tsim = 1 - tdist
self.tmaxidf = tmaxidf
self.tsumidf = tsumidf
self.nmatches = nmatches
self.start = mstart
self.end = mend
self.kt = kt
self.dists = sorted(dists, reverse=True)
self.lsim = tdist + (max(dists) * (self.maxlen - self.minlen))
self.tmax = max(dists)
self.tmin = max(dists)
self.tsum = sum(dists)
self.tstd = np.std(dists)
return self.tsim
def features(self):
fs = [self.tsim, self.lsim, self.kdist, self.kldist, self.ldist, self.kt, self.tmax, self.tmin, self.tsum, self.tstd, self.tmaxidf, self.tsumidf]
if self.dimfeatures:
distarr = [0] * self.maxsent
dists = self.dists
distarr[0:len(dists)] = dists
fs += distarr
return fs
class InfSim(Matcher):
#def __init__(self, df, metric='cosine'):
# #self.metric = getMetric(metric)
# self.df = df
# self._vectorsums = dict()
def __init__(self, data, wordvec, metric='cosine', dimfeatures=False):
self.df = wordvec.logdf(data.wv_vocab())
data.normalize(self, self.df)
self._vectorsums = dict()
@classmethod
def normalize(cls, s, df):
if len(s) == 0:
return s, 0
if np.any(np.isnan(df)):
printd("Hmm, nan for df %0.4f")
printd("df:\n" + str(df))
sys.exit(1)
# TODO: This should be a weighted sum with IDF
# As a result of this sum, different length sentences naturally receive a
# penalty, as the sum is naturally larger than the min.
# Also, we aren't looking at euclidean distance, so we may be losing out on scale information
# But if we did, longer sentences would be harder to match together (as distances would compound).
# Maybe should divide by min sentence legnth or something of the sort...
#This is avg, not sum...................................................
# probably causes all sorts of weirdness
ps = np.sum(s, axis=0) / np.sum(s)
if np.any(np.isnan(ps)):
printd("Hmm, nan for ps %0.4f" % np.sum(s))
printd("ps:\n" + str(ps))
printd("s:\n" + str(s))
printd("df:\n" + str(df))
sys.exit(1)
ts = np.sum(np.multiply(ps, df))
if ts == 0:
printd("Hmm, 0 for ts")
printd("ps:\n" + str(ps))
printd("df:\n" + str(df))
sys.exit(1)
return ps, ts
def match(self, pair):
self.pair = pair
self.s1 = pair.s1["vector"]
self.s2 = pair.s2["vector"]
self.ts1 = pair.s1["vector_sum"]
self.ts2 = pair.s2["vector_sum"]
return self.domatch()
def domatch(self):
self.nmatches = -1
if self.ts1 == 0 or self.ts2 == 0:
self.tsim = 0.0
self.start = -1
self.end = -1
return self.tsim
self.tsim = 2 * sum([min(s1i, s2i) * dfi for s1i, s2i, dfi in izip(self.s1, self.s2, self.df)]) / (self.ts1 + self.ts2)
self.start = -1
self.end = len(self.s2) - 1
return self.tsim
def pairwisedist(self, s1, s2):
#Must create the "vector" and "vector_sum" for each word, rather than for each sentence
dists = np.zeros((len(s1["wv_tokens"]), len(s2["wv_tokens"])))
for wi1, (w1, v1) in enumerate(izip(s1["wv_tokens"], s1["vector"])):
for wi2, (w2, v2) in enumerate(izip(s2["wv_tokens"], s2["vector"])):
self.s1 = v1
self.s2 = v2
self.ts1 = self.vectorsum(w1, v1)
self.ts2 = self.vectorsum(w2, v2)
dists[wi1, wi2] = 1 - self.domatch()
#TODO could multiply by term based on wi1/wi2 (distance penalty)...
if dists[wi1, wi2] < 0:
dists[wi1, wi2] = 0
#printd("Hmm, negative dist %g" % dists[wi1, wi2])
# Annoying rounding errors, e.g. -2.22045e-16
return dists
def vectorsum(self, word, wv):
if word not in self._vectorsums:
self._vectorsums[word] = np.sum(np.multiply(wv, self.df))
return self._vectorsums[word]
class InfRankSim(Matcher):
def __init__(self, data, wordvec, df=None, metric='cosine', dimfeatures=True):
self._vectorsums = dict()
self.vocabvec = self.sortwords(data.wv_vocab(), wordvec)
self._names = ["IRS_" + x for x in ["asim", "mdim", "msent"]]
if dimfeatures:
self._names.extend(["IRS_d%03d" % x for x in range(wordvec.size)])
self.dimfeatures = dimfeatures
def sortwords(self, vocab, wordvec):
vvecs = [list() for _ in range(wordvec.size)]
ftot = 0
for t, tc in vocab.iteritems():
try:
tv = wordvec[t]
ftot += tc
for d in range(len(tv)):
vvecs[d].append((tv[d], t))
except KeyError:
pass
lookupvecs = [dict() for _ in range(wordvec.size)]
for d, vvec in enumerate(vvecs):
vvec.sort()
cdf = 0
#vtot = len(vvec)
#vitm = 1 / vtot
lookup = lookupvecs[d]
for tv, t in vvec:
# Should the CDF be based on TF? or just on the word existence?
cdf += tv / ftot
#cdf += vitm
lookup[t] = cdf
return lookupvecs
def match(self, pair):
wvlen = len(pair.s1["vector"][0])
m = len(pair.s1["wv_tokens"])
n = len(pair.s2["wv_tokens"])
self._features = []
# Take the average of all distances
asim = 0
mdim = 0
msent = 0
for d in range(wvlen):
mindimp = 1
for t1 in pair.s1["wv_tokens"]:
minsentp = 1
for t2 in pair.s2["wv_tokens"]:
p = abs(self.vocabvec[d][t1] - self.vocabvec[d][t2])
asim += simP(p)
if p < mindimp:
mindimp = p
if p < minsentp:
minsentp = p
# Take the minimum across one sentences
msent += simP(minsentp)
# Take the minimum across dimensions
mdim += simP(mindimp)
asim /= m * n * wvlen
self._features.append(asim)
self._features.append(mdim / wvlen)
msent /= n * wvlen
self._features.append(msent)
if self.dimfeatures:
for d in range(wvlen):
combvec = ([(self.vocabvec[d][t1], 0) for t1 in pair.s1["wv_tokens"]] +
[(self.vocabvec[d][t2], 1) for t2 in pair.s1["wv_tokens"]])
combvec.sort()
combval = multicdf(combvec)
self._features.append(simP(combval))
self.tsim = asim
self.start = 0
self.end = m
def simP(p):
if p == 0:
return 1
elif p == 1:
return 0
else:
return 1 / (1 + (1 / (math.log(1 / p, 2))))
# TODO: Should add slack, so not all values must match. Would require a lot
# more bookkeeping
def multicdf(vec):
lastvs = [[], []]
cdf = 0
for v, senti in vec:
vs = lastvs[senti]
ovs = lastvs[senti - 1]
if len(ovs) != 0:
if len(vs) == 0:
cdf += v - ovs[0]
del ovs[:]
else:
back = None
forward = None
prevv = vs[0]
# If expecting large set, could do binary search...
for ov in ovs:
if (ov - prevv) < (v - ov):
back = ov
else:
forward = ov
break
if back is not None:
cdf += back - prevv
if forward is not None:
cdf += v - forward
del vs[:]
del ovs[:]
vs.append(v)
#if lasti is not None:
# cdf += (v - lastv)
# if senti != lasti:
# lasti = None
#else:
# lasti = senti
#lastv = v
return cdf
def ndist(s1, s2, fn='cosine'):
rd = []
for w1 in s1:
rr = []
for w2 in s2:
rr.append(w1.dist(w2, fn=fn))
rd.append(rr)
return np.array(rd)
# Wald-Wolfowitz test
# Adapted from:
# Monaco, John V.
# "Classification and authentication of one-dimensional behavioral biometrics."
# Biometrics (IJCB), 2014 IEEE International Joint Conference on. IEEE, 2014.
# https://gist.github.com/vmonaco/e9ff0ac61fcb3b1b60ba
class WWSim(Matcher):
def __init__(self, wordvec, df=None, metric='cosine', k=10, dimfeatures=True):
self.k = 10
self._names = ["WWS_base"]
if dimfeatures:
self._names.extend(["WWS_%03d" % x for x in range(wordvec.size)])
self.dimfeatures = dimfeatures
def match(self, pair):
self.pair = pair
v1 = pair.s1["vector"]
v2 = pair.s2["vector"]
m = len(v1)
n = len(v2)
N = m + n
k = min(N - 1, self.k)
if m == 0 or n == 0 or np.linalg.norm(v1) == 0 or np.linalg.norm(v2) == 0:
return 0
vs = np.concatenate((v1, v2))
g = kneighbors_graph(vs, mode='distance', n_neighbors=k)
mst = minimum_spanning_tree(g, overwrite=True)
edges = np.array(mst.nonzero()).T
labels = np.array([0] * m + [1] * n)
c = labels[edges]
runs_edges = edges[c[:, 0] == c[:, 1]]
# number of runs is the total number of observations minus edges within each run
R = N - len(runs_edges)
# expected value of R
e_R = ((2.0 * m * n) / N) + 1
# variance of R is _numer/_denom
v = 2 * m * n * (2 * m * n - N) / (N ** 2 * (N - 1))
# see Eq. 1 in Friedman 1979
# W approaches a standard normal distribution
W = (R - e_R) / np.sqrt(v)
self.tsim = -1 if np.isnan(W) else W
bydim = []
for d in range(len(v1[0])):
sorteddim = np.argsort(vs[:, d])
wd = wald_wolfowitz(labels[sorteddim])
bydim.append(wd['z'])
self._features = [self.tsim]
if self.dimfeatures:
self._features += bydim
return self.tsim
class PairFeatures(Matcher):
def __init__(self, dimfeatures=True):
self.dimfeatures = dimfeatures
self._names = ["PF_" + x for x in ["Len", "TextLen", "TextDiff"]]
def match(self, pair):
fs = []
self._features = fs
self.tsim = abs(len(pair.s1["vector"]) - len(pair.s2["vector"]))
fs.append(self.tsim)
fs.append(abs(len(pair.s1["text"]) - len(pair.s2["text"])))
fs.append(abs(len(pair.s1["text"]) + len(pair.s2["text"]) - len(" ".join(pair.s1["wv_tokens"])) - len(" ".join(pair.s2["wv_tokens"]))))
return self.tsim
| apache-2.0 |
yunque/sms-tools | lectures/03-Fourier-properties/plots-code/convolution-1.py | 24 | 1341 | import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
from scipy.fftpack import fft, ifft, fftshift
import math
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import dftModel as DF
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
(fs, x2) = UF.wavread('../../../sounds/impulse-response.wav')
x1 = x[40000:44096]
N = 4096
plt.figure(1, figsize=(9.5, 7))
plt.subplot(3,2,1)
plt.title('x1 (ocean.wav)')
plt.plot(x1, 'b')
plt.axis([0,N,min(x1),max(x1)])
plt.subplot(3,2,2)
plt.title('x2 (impulse-response.wav)')
plt.plot(x2, 'b')
plt.axis([0,N,min(x2),max(x2)])
mX1, pX1 = DF.dftAnal(x1, np.ones(N), N)
mX1 = mX1 - max(mX1)
plt.subplot(3,2,3)
plt.title('X1')
plt.plot(mX1, 'r')
plt.axis([0,N/2,-70,0])
mX2, pX2 = DF.dftAnal(x2, np.ones(N), N)
mX2 = mX2 - max(mX2)
plt.subplot(3,2,4)
plt.title('X2')
plt.plot(mX2, 'r')
plt.axis([0,N/2,-70,0])
y = np.convolve(x1, x2)
mY, pY = DF.dftAnal(y[0:N], np.ones(N), N)
mY = mY - max(mY)
plt.subplot(3,2,5)
plt.title('DFT(x1 * x2)')
plt.plot(mY, 'r')
plt.axis([0,N/2,-70,0])
plt.subplot(3,2,6)
plt.title('X1 x X2')
mY1 = 20*np.log10(np.abs(fft(x1) * fft(x2)))
mY1 = mY1 - max(mY1)
plt.plot(mY1[0:N/2], 'r')
plt.axis([0,N/2,-84,0])
plt.tight_layout()
plt.savefig('convolution-1.png')
plt.show()
| agpl-3.0 |
JPFrancoia/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
cookinrelaxin/stupendousML | simple_regression.py | 1 | 1925 | from matplotlib import pyplot as plt
def read_csv(file_name):
with open(file_name, 'r') as f:
data = []
attribute_names = {index:name for index,name in enumerate(f.readline().strip().split(','))}
while True:
line = f.readline()
if not line: return data
else:
data.append({attribute_names[index]:val for index,val in enumerate(line.strip().split(','))})
def simple_regression(input_feature, output):
N = len(output)
input_sum = sum(input_feature)
input_squared_sum = sum([val**2 for val in input_feature])
output_sum = sum(output)
input_output_sum = sum([x*y for x,y in zip(input_feature, output)])
slope = (input_output_sum - ((input_sum * output_sum) / N)) / (input_squared_sum - (input_sum ** 2 / N))
intercept = (output_sum / N) - (slope * (input_sum / N))
return(intercept, slope)
def get_regression_predictions(input_feature, intercept, slope):
return [intercept + (slope * feature) for feature in input_feature]
def get_residual_sum_of_squares(input_feature, output, intercept, slope):
return sum([(y - (intercept + (x*slope))) ** 2 for x,y in zip(input_feature, output)])
def inverse_regression_predictions(output, intercept, slope):
return (output - intercept) / slope
house_data = read_csv('kc_house_data.csv')
train_data,test_data = (house_data[:int(len(house_data) * .8)],house_data[int(len(house_data) * .8):])
sqft_vals = [float(point['sqft_living']) for point in train_data]
price_vals = [float(point['price']) for point in train_data]
intercept,slope = simple_regression(sqft_vals, price_vals)
print 'attributes: ', [attr for attr in train_data[0].keys()]
print intercept,slope
print get_regression_predictions([2650], intercept, slope)
plt.scatter(sqft_vals, price_vals)
plt.plot([0, 14000], [intercept, slope * 14000], 'k-')
plt.ylim(0,max(price_vals))
plt.xlim(0,max(sqft_vals))
plt.xlabel('sqft')
plt.ylabel('price USD')
plt.title('sqft vs. price')
plt.show()
| mit |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/path.py | 4 | 38107 | """
A module for dealing with the polylines used throughout matplotlib.
The primary class for polyline handling in matplotlib is :class:`Path`.
Almost all vector drawing makes use of Paths somewhere in the drawing
pipeline.
Whilst a :class:`Path` instance itself cannot be drawn, there exists
:class:`~matplotlib.artist.Artist` subclasses which can be used for
convenient Path visualisation - the two most frequently used of these are
:class:`~matplotlib.patches.PathPatch` and
:class:`~matplotlib.collections.PathCollection`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
from weakref import WeakValueDictionary
import numpy as np
from numpy import ma
from matplotlib import _path
from matplotlib.cbook import simple_linear_interpolation, maxdict
from matplotlib import rcParams
class Path(object):
"""
:class:`Path` represents a series of possibly disconnected,
possibly closed, line and curve segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of vertex types
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices as well as three codes ``CURVE3``.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not
required and ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bezier curve from the current position,
with the given control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bezier curve from the current position, with
the given control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current
polyline.
Users of Path objects should not access the vertices and codes
arrays directly. Instead, they should use :meth:`iter_segments`
or :meth:`cleaned` to get the vertex/code pairs. This is important,
since many :class:`Path` objects, as an optimization, do not store a
*codes* at all, but have a default one provided for them by
:meth:`iter_segments`.
.. note::
The vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
# Path codes
STOP = 0 # 1 vertex
MOVETO = 1 # 1 vertex
LINETO = 2 # 1 vertex
CURVE3 = 3 # 2 vertices
CURVE4 = 4 # 3 vertices
CLOSEPOLY = 79 # 1 vertex
#: A dictionary mapping Path codes to the number of vertices that the
#: code expects.
NUM_VERTICES_FOR_CODE = {STOP: 1,
MOVETO: 1,
LINETO: 1,
CURVE3: 2,
CURVE4: 3,
CLOSEPOLY: 1}
code_type = np.uint8
def __init__(self, vertices, codes=None, _interpolation_steps=1,
closed=False, readonly=False):
"""
Create a new path with the given vertices and codes.
Parameters
----------
vertices : array_like
The ``(n, 2)`` float array, masked array or sequence of pairs
representing the vertices of the path.
If *vertices* contains masked values, they will be converted
to NaNs which are then handled correctly by the Agg
PathIterator and other consumers of path data, such as
:meth:`iter_segments`.
codes : {None, array_like}, optional
n-length array integers representing the codes of the path.
If not None, codes must be the same length as vertices.
If None, *vertices* will be treated as a series of line segments.
_interpolation_steps : int, optional
Used as a hint to certain projections, such as Polar, that this
path should be linearly interpolated immediately before drawing.
This attribute is primarily an implementation detail and is not
intended for public use.
closed : bool, optional
If *codes* is None and closed is True, vertices will be treated as
line segments of a closed polygon.
readonly : bool, optional
Makes the path behave in an immutable way and sets the vertices
and codes as read-only arrays.
"""
if ma.isMaskedArray(vertices):
vertices = vertices.astype(np.float_).filled(np.nan)
else:
vertices = np.asarray(vertices, np.float_)
if (vertices.ndim != 2) or (vertices.shape[1] != 2):
msg = "'vertices' must be a 2D list or array with shape Nx2"
raise ValueError(msg)
if codes is not None:
codes = np.asarray(codes, self.code_type)
if (codes.ndim != 1) or len(codes) != len(vertices):
msg = ("'codes' must be a 1D list or array with the same"
" length of 'vertices'")
raise ValueError(msg)
if len(codes) and codes[0] != self.MOVETO:
msg = ("The first element of 'code' must be equal to 'MOVETO':"
" {0}")
raise ValueError(msg.format(self.MOVETO))
elif closed:
codes = np.empty(len(vertices), dtype=self.code_type)
codes[0] = self.MOVETO
codes[1:-1] = self.LINETO
codes[-1] = self.CLOSEPOLY
self._vertices = vertices
self._codes = codes
self._interpolation_steps = _interpolation_steps
self._update_values()
if readonly:
self._vertices.flags.writeable = False
if self._codes is not None:
self._codes.flags.writeable = False
self._readonly = True
else:
self._readonly = False
@classmethod
def _fast_from_codes_and_verts(cls, verts, codes, internals=None):
"""
Creates a Path instance without the expense of calling the constructor
Parameters
----------
verts : numpy array
codes : numpy array
internals : dict or None
The attributes that the resulting path should have.
Allowed keys are ``readonly``, ``should_simplify``,
``simplify_threshold``, ``has_nonfinite`` and
``interpolation_steps``.
"""
internals = internals or {}
pth = cls.__new__(cls)
if ma.isMaskedArray(verts):
verts = verts.astype(np.float_).filled(np.nan)
else:
verts = np.asarray(verts, np.float_)
pth._vertices = verts
pth._codes = codes
pth._readonly = internals.pop('readonly', False)
pth.should_simplify = internals.pop('should_simplify', True)
pth.simplify_threshold = (
internals.pop('simplify_threshold',
rcParams['path.simplify_threshold'])
)
pth._has_nonfinite = internals.pop('has_nonfinite', False)
pth._interpolation_steps = internals.pop('interpolation_steps', 1)
if internals:
raise ValueError('Unexpected internals provided to '
'_fast_from_codes_and_verts: '
'{0}'.format('\n *'.join(six.iterkeys(
internals
))))
return pth
def _update_values(self):
self._should_simplify = (
rcParams['path.simplify'] and
(len(self._vertices) >= 128 and
(self._codes is None or np.all(self._codes <= Path.LINETO)))
)
self._simplify_threshold = rcParams['path.simplify_threshold']
self._has_nonfinite = not np.isfinite(self._vertices).all()
@property
def vertices(self):
"""
The list of vertices in the `Path` as an Nx2 numpy array.
"""
return self._vertices
@vertices.setter
def vertices(self, vertices):
if self._readonly:
raise AttributeError("Can't set vertices on a readonly Path")
self._vertices = vertices
self._update_values()
@property
def codes(self):
"""
The list of codes in the `Path` as a 1-D numpy array. Each
code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4`
or `CLOSEPOLY`. For codes that correspond to more than one
vertex (`CURVE3` and `CURVE4`), that code will be repeated so
that the length of `self.vertices` and `self.codes` is always
the same.
"""
return self._codes
@codes.setter
def codes(self, codes):
if self._readonly:
raise AttributeError("Can't set codes on a readonly Path")
self._codes = codes
self._update_values()
@property
def simplify_threshold(self):
"""
The fraction of a pixel difference below which vertices will
be simplified out.
"""
return self._simplify_threshold
@simplify_threshold.setter
def simplify_threshold(self, threshold):
self._simplify_threshold = threshold
@property
def has_nonfinite(self):
"""
`True` if the vertices array has nonfinite values.
"""
return self._has_nonfinite
@property
def should_simplify(self):
"""
`True` if the vertices array should be simplified.
"""
return self._should_simplify
@should_simplify.setter
def should_simplify(self, should_simplify):
self._should_simplify = should_simplify
@property
def readonly(self):
"""
`True` if the `Path` is read-only.
"""
return self._readonly
def __copy__(self):
"""
Returns a shallow copy of the `Path`, which will share the
vertices and codes with the source `Path`.
"""
import copy
return copy.copy(self)
copy = __copy__
def __deepcopy__(self, memo=None):
"""
Returns a deepcopy of the `Path`. The `Path` will not be
readonly, even if the source `Path` is.
"""
try:
codes = self.codes.copy()
except AttributeError:
codes = None
return self.__class__(
self.vertices.copy(), codes,
_interpolation_steps=self._interpolation_steps)
deepcopy = __deepcopy__
@classmethod
def make_compound_path_from_polys(cls, XY):
"""
Make a compound path object to draw a number
of polygons with equal numbers of sides XY is a (numpolys x
numsides x 2) numpy array of vertices. Return object is a
:class:`Path`
.. plot:: mpl_examples/api/histogram_path_demo.py
"""
# for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for
# the CLOSEPOLY; the vert for the closepoly is ignored but we still
# need it to keep the codes aligned with the vertices
numpolys, numsides, two = XY.shape
if two != 2:
raise ValueError("The third dimension of 'XY' must be 2")
stride = numsides + 1
nverts = numpolys * stride
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * cls.LINETO
codes[0::stride] = cls.MOVETO
codes[numsides::stride] = cls.CLOSEPOLY
for i in range(numsides):
verts[i::stride] = XY[:, i]
return cls(verts, codes)
@classmethod
def make_compound_path(cls, *args):
"""Make a compound path from a list of Path objects."""
# Handle an empty list in args (i.e. no args).
if not args:
return Path(np.empty([0, 2], dtype=np.float32))
lengths = [len(x) for x in args]
total_length = sum(lengths)
vertices = np.vstack([x.vertices for x in args])
vertices.reshape((total_length, 2))
codes = np.empty(total_length, dtype=cls.code_type)
i = 0
for path in args:
if path.codes is None:
codes[i] = cls.MOVETO
codes[i + 1:i + len(path.vertices)] = cls.LINETO
else:
codes[i:i + len(path.codes)] = path.codes
i += len(path.vertices)
return cls(vertices, codes)
def __repr__(self):
return "Path(%r, %r)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, transform=None, remove_nans=True, clip=None,
snap=False, stroke_width=1.0, simplify=None,
curves=True, sketch=None):
"""
Iterates over all of the curve segments in the path. Each
iteration returns a 2-tuple (*vertices*, *code*), where
*vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
one of the :class:`Path` codes.
Additionally, this method can provide a number of standard
cleanups and conversions to the path.
Parameters
----------
transform : None or :class:`~matplotlib.transforms.Transform` instance
If not None, the given affine transformation will
be applied to the path.
remove_nans : {False, True}, optional
If True, will remove all NaNs from the path and
insert MOVETO commands to skip over them.
clip : None or sequence, optional
If not None, must be a four-tuple (x1, y1, x2, y2)
defining a rectangle in which to clip the path.
snap : None or bool, optional
If None, auto-snap to pixels, to reduce
fuzziness of rectilinear lines. If True, force snapping, and
if False, don't snap.
stroke_width : float, optional
The width of the stroke being drawn. Needed
as a hint for the snapping algorithm.
simplify : None or bool, optional
If True, perform simplification, to remove
vertices that do not affect the appearance of the path. If
False, perform no simplification. If None, use the
should_simplify member variable.
curves : {True, False}, optional
If True, curve segments will be returned as curve
segments. If False, all curves will be converted to line
segments.
sketch : None or sequence, optional
If not None, must be a 3-tuple of the form
(scale, length, randomness), representing the sketch
parameters.
"""
if not len(self):
return
cleaned = self.cleaned(transform=transform,
remove_nans=remove_nans, clip=clip,
snap=snap, stroke_width=stroke_width,
simplify=simplify, curves=curves,
sketch=sketch)
vertices = cleaned.vertices
codes = cleaned.codes
len_vertices = vertices.shape[0]
# Cache these object lookups for performance in the loop.
NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
STOP = self.STOP
i = 0
while i < len_vertices:
code = codes[i]
if code == STOP:
return
else:
num_vertices = NUM_VERTICES_FOR_CODE[code]
curr_vertices = vertices[i:i+num_vertices].flatten()
yield curr_vertices, code
i += num_vertices
def cleaned(self, transform=None, remove_nans=False, clip=None,
quantize=False, simplify=False, curves=False,
stroke_width=1.0, snap=False, sketch=None):
"""
Cleans up the path according to the parameters returning a new
Path instance.
.. seealso::
See :meth:`iter_segments` for details of the keyword arguments.
Returns
-------
Path instance with cleaned up vertices and codes.
"""
vertices, codes = _path.cleanup_path(self, transform,
remove_nans, clip,
snap, stroke_width,
simplify, curves, sketch)
internals = {'should_simplify': self.should_simplify and not simplify,
'has_nonfinite': self.has_nonfinite and not remove_nans,
'simplify_threshold': self.simplify_threshold,
'interpolation_steps': self._interpolation_steps}
return Path._fast_from_codes_and_verts(vertices, codes, internals)
def transformed(self, transform):
"""
Return a transformed copy of the path.
.. seealso::
:class:`matplotlib.transforms.TransformedPath`
A specialized path class that will cache the
transformed result and automatically update when the
transform changes.
"""
return Path(transform.transform(self.vertices), self.codes,
self._interpolation_steps)
def contains_point(self, point, transform=None, radius=0.0):
"""
Returns whether the (closed) path contains the given point.
If *transform* is not ``None``, the path will be transformed before
performing the test.
*radius* allows the path to be made slightly larger or smaller.
"""
if transform is not None:
transform = transform.frozen()
result = _path.point_in_path(point[0], point[1], radius, self,
transform)
return result
def contains_points(self, points, transform=None, radius=0.0):
"""
Returns a bool array which is ``True`` if the (closed) path contains
the corresponding point.
If *transform* is not ``None``, the path will be transformed before
performing the test.
*radius* allows the path to be made slightly larger or smaller.
"""
if transform is not None:
transform = transform.frozen()
result = _path.points_in_path(points, radius, self, transform)
return result.astype('bool')
def contains_path(self, path, transform=None):
"""
Returns whether this (closed) path completely contains the given path.
If *transform* is not ``None``, the path will be transformed before
performing the test.
"""
if transform is not None:
transform = transform.frozen()
return _path.path_in_path(self, None, path, transform)
def get_extents(self, transform=None):
"""
Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
path.
Unlike computing the extents on the *vertices* alone, this
algorithm will take into account the curves and deal with
control points appropriately.
"""
from .transforms import Bbox
path = self
if transform is not None:
transform = transform.frozen()
if not transform.is_affine:
path = self.transformed(transform)
transform = None
return Bbox(_path.get_path_extents(path, transform))
def intersects_path(self, other, filled=True):
"""
Returns *True* if this path intersects another given path.
*filled*, when True, treats the paths as if they were filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
return _path.path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Returns *True* if this path intersects a given
:class:`~matplotlib.transforms.Bbox`.
*filled*, when True, treats the path as if it was filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
from .transforms import BboxTransformTo
rectangle = self.unit_rectangle().transformed(
BboxTransformTo(bbox))
result = self.intersects_path(rectangle, filled)
return result
def interpolated(self, steps):
"""
Returns a new path resampled to length N x steps. Does not
currently handle interpolating curves.
"""
if steps == 1:
return self
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0, closed_only=True):
"""
Convert this path to a list of polygons or polylines. Each
polygon/polyline is an Nx2 array of vertices. In other words,
each polygon has no ``MOVETO`` instructions or curves. This
is useful for displaying in backends that do not support
compound paths or Bezier curves, such as GDK.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
If *closed_only* is `True` (default), only closed polygons,
with the last point being the same as the first point, will be
returned. Any unclosed polylines in the path will be
explicitly closed. If *closed_only* is `False`, any unclosed
polygons in the path will be returned as unclosed polygons,
and the closed polygons will be returned explicitly closed by
setting the last point to the same as the first point.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
vertices = self.vertices
if closed_only:
if len(vertices) < 3:
return []
elif np.any(vertices[0] != vertices[-1]):
vertices = list(vertices) + [vertices[0]]
if transform is None:
return [vertices]
else:
return [transform.transform(vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return _path.convert_path_to_polygons(
self, transform, width, height, closed_only)
_unit_rectangle = None
@classmethod
def unit_rectangle(cls):
"""
Return a :class:`Path` instance of the unit rectangle
from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = \
cls([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0],
[0.0, 0.0]],
[cls.MOVETO, cls.LINETO, cls.LINETO, cls.LINETO,
cls.CLOSEPOLY],
readonly=True)
return cls._unit_rectangle
_unit_regular_polygons = WeakValueDictionary()
@classmethod
def unit_regular_polygon(cls, numVertices):
"""
Return a :class:`Path` instance for a unit regular
polygon with the given *numVertices* and radius of 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = (2*np.pi/numVertices *
np.arange(numVertices + 1).reshape((numVertices + 1, 1)))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
verts = np.concatenate((np.cos(theta), np.sin(theta)), 1)
codes = np.empty((numVertices + 1,))
codes[0] = cls.MOVETO
codes[1:-1] = cls.LINETO
codes[-1] = cls.CLOSEPOLY
path = cls(verts, codes, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[numVertices] = path
return path
_unit_regular_stars = WeakValueDictionary()
@classmethod
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
Return a :class:`Path` for a unit regular star
with the given numVertices and radius of 1.0, centered at (0,
0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose()
codes = np.empty((ns2 + 1,))
codes[0] = cls.MOVETO
codes[1:-1] = cls.LINETO
codes[-1] = cls.CLOSEPOLY
path = cls(verts, codes, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[(numVertices, innerCircle)] = path
return path
@classmethod
def unit_regular_asterisk(cls, numVertices):
"""
Return a :class:`Path` for a unit regular
asterisk with the given numVertices and radius of 1.0,
centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
_unit_circle = None
@classmethod
def unit_circle(cls):
"""
Return the readonly :class:`Path` of the unit circle.
For most cases, :func:`Path.circle` will be what you want.
"""
if cls._unit_circle is None:
cls._unit_circle = cls.circle(center=(0, 0), radius=1,
readonly=True)
return cls._unit_circle
@classmethod
def circle(cls, center=(0., 0.), radius=1., readonly=False):
"""
Return a Path representing a circle of a given radius and center.
Parameters
----------
center : pair of floats
The center of the circle. Default ``(0, 0)``.
radius : float
The radius of the circle. Default is 1.
readonly : bool
Whether the created path should have the "readonly" argument
set when creating the Path instance.
Notes
-----
The circle is approximated using cubic Bezier curves. This
uses 8 splines around the circle using the approach presented
here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array([[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
dtype=np.float_)
codes = [cls.CURVE4] * 26
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
return Path(vertices * radius + center, codes, readonly=readonly)
_unit_circle_righthalf = None
@classmethod
def unit_circle_righthalf(cls):
"""
Return a :class:`Path` of the right half
of a unit circle. The circle is approximated using cubic Bezier
curves. This uses 4 splines around the circle using the approach
presented here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
if cls._unit_circle_righthalf is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[0.0, -1.0]],
np.float_)
codes = cls.CURVE4 * np.ones(14)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
return cls._unit_circle_righthalf
@classmethod
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
Return an arc on the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
# degrees to radians
theta1 *= np.pi / 180.0
theta2 *= np.pi / 180.0
twopi = np.pi * 2.0
halfpi = np.pi * 0.5
eta1 = np.arctan2(np.sin(theta1), np.cos(theta1))
eta2 = np.arctan2(np.sin(theta2), np.cos(theta2))
eta2 -= twopi * np.floor((eta2 - eta1) / twopi)
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), np.float_)
codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [cls.MOVETO, cls.LINETO]
codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.empty((length, 2), np.float_)
codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = cls.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset:end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset:end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return cls(vertices, codes, readonly=True)
@classmethod
def wedge(cls, theta1, theta2, n=None):
"""
Return a wedge of the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
"""
return cls.arc(theta1, theta2, n, True)
_hatch_dict = maxdict(8)
@classmethod
def hatch(cls, hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates a Path that
can be used in a repeated hatching pattern. *density* is the
number of lines per unit square.
"""
from matplotlib.hatch import get_path
if hatchpattern is None:
return None
hatch_path = cls._hatch_dict.get((hatchpattern, density))
if hatch_path is not None:
return hatch_path
hatch_path = get_path(hatchpattern, density)
cls._hatch_dict[(hatchpattern, density)] = hatch_path
return hatch_path
def clip_to_bbox(self, bbox, inside=True):
"""
Clip the path to the given bounding box.
The path must be made up of one or more closed polygons. This
algorithm will not behave correctly for unclosed paths.
If *inside* is `True`, clip to the inside of the box, otherwise
to the outside of the box.
"""
# Use make_compound_path_from_polys
verts = _path.clip_path_to_rect(self, bbox, inside)
paths = [Path(poly) for poly in verts]
return self.make_compound_path(*paths)
def get_path_collection_extents(
master_transform, paths, transforms, offsets, offset_transform):
"""
Given a sequence of :class:`Path` objects,
:class:`~matplotlib.transforms.Transform` objects and offsets, as
found in a :class:`~matplotlib.collections.PathCollection`,
returns the bounding box that encapsulates all of them.
*master_transform* is a global transformation to apply to all paths
*paths* is a sequence of :class:`Path` instances.
*transforms* is a sequence of
:class:`~matplotlib.transforms.Affine2D` instances.
*offsets* is a sequence of (x, y) offsets (or an Nx2 array)
*offset_transform* is a :class:`~matplotlib.transforms.Affine2D`
to apply to the offsets before applying the offset to the path.
The way that *paths*, *transforms* and *offsets* are combined
follows the same method as for collections. Each is iterated over
independently, so if you have 3 paths, 2 transforms and 1 offset,
their combinations are as follows:
(A, A, A), (B, B, A), (C, A, A)
"""
from .transforms import Bbox
if len(paths) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_path.get_path_collection_extents(
master_transform, paths, np.atleast_3d(transforms),
offsets, offset_transform))
def get_paths_extents(paths, transforms=[]):
"""
Given a sequence of :class:`Path` objects and optional
:class:`~matplotlib.transforms.Transform` objects, returns the
bounding box that encapsulates all of them.
*paths* is a sequence of :class:`Path` instances.
*transforms* is an optional sequence of
:class:`~matplotlib.transforms.Affine2D` instances to apply to
each path.
"""
from .transforms import Bbox, Affine2D
if len(paths) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_path.get_path_collection_extents(
Affine2D(), paths, transforms, [], Affine2D()))
def _define_deprecated_functions(ns):
from .cbook import deprecated
# The C++ functions are not meant to be used directly.
# Users should use the more pythonic wrappers in the Path
# class instead.
for func, alternative in [
('point_in_path', 'path.Path.contains_point'),
('get_path_extents', 'path.Path.get_extents'),
('point_in_path_collection', 'collection.Collection.contains'),
('path_in_path', 'path.Path.contains_path'),
('path_intersects_path', 'path.Path.intersects_path'),
('convert_path_to_polygons', 'path.Path.to_polygons'),
('cleanup_path', 'path.Path.cleaned'),
('points_in_path', 'path.Path.contains_points'),
('clip_path_to_rect', 'path.Path.clip_to_bbox')]:
ns[func] = deprecated(
since='1.3', alternative=alternative)(getattr(_path, func))
_define_deprecated_functions(locals())
| bsd-3-clause |
thdtjsdn/FreeCAD | src/Mod/Plot/Plot.py | 16 | 12328 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD
import PySide
from PySide import QtCore, QtGui
try:
import matplotlib
matplotlib.use('Qt4Agg')
matplotlib.rcParams['backend.qt4']='PySide'
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
except ImportError:
msg = PySide.QtGui.QApplication.translate(
"plot_console",
"matplotlib not found, so Plot module can not be loaded",
None,
PySide.QtGui.QApplication.UnicodeUTF8)
FreeCAD.Console.PrintMessage(msg + '\n')
raise ImportError("matplotlib not installed")
def getMainWindow():
""" Return the FreeCAD main window. """
toplevel = PySide.QtGui.QApplication.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
return None
def getMdiArea():
""" Return FreeCAD MdiArea. """
mw = getMainWindow()
if not mw:
return None
childs = mw.children()
for c in childs:
if isinstance(c, PySide.QtGui.QMdiArea):
return c
return None
def getPlot():
""" Return the selected Plot document if exist. """
# Get active tab
mdi = getMdiArea()
if not mdi:
return None
sub = mdi.activeSubWindow()
if not sub:
return None
# Explore childrens looking for Plot class
for i in sub.children():
if i.metaObject().className() == "Plot":
return i
return None
def closePlot():
""" closePlot(): Close the active plot window. """
# Get active tab
mdi = getMdiArea()
if not mdi:
return None
sub = mdi.activeSubWindow()
if not sub:
return None
# Explore childrens looking for Plot class
for i in sub.children():
if i.metaObject().className() == "Plot":
sub.close()
def figure(winTitle="plot"):
"""Create a new plot subwindow/tab.
Keyword arguments:
winTitle -- Plot tab title.
"""
mdi = getMdiArea()
if not mdi:
return None
win = Plot(winTitle)
sub = mdi.addSubWindow(win)
sub.show()
return win
def plot(x, y, name=None):
"""Plots a new serie (as line plot)
Keyword arguments:
x -- X values
y -- Y values
name -- Data serie name (for legend).
"""
# Get active plot, or create another one if don't exist
plt = getPlot()
if not plt:
plt = figure()
# Call to plot
return plt.plot(x, y, name)
def series():
"""Return all the lines from a selected plot."""
plt = getPlot()
if not plt:
return []
return plt.series
def removeSerie(index):
"""Remove a data serie from the active plot.
Keyword arguments:
index -- Index of the serie to remove.
"""
# Get active series
plt = getPlot()
if not plt:
return
plots = plt.series
if not plots:
return
# Remove line from plot
axes = plots[index].axes
axes.lines.pop(plots[index].lid)
# Remove serie from list
del plt.series[index]
# Update GUI
plt.update()
def legend(status=True, pos=None, fontsize=None):
"""Show/Hide the legend from the active plot.
Keyword arguments:
status -- True if legend must be shown, False otherwise.
pos -- Legend position.
fontsize -- Font size
"""
plt = getPlot()
if not plt:
return
plt.legend = status
if fontsize:
plt.legSiz = fontsize
# Hide all legends
for axes in plt.axesList:
axes.legend_ = None
# Legend must be activated on last axes
axes = plt.axesList[-1]
if status:
# Setup legend handles and names
lines = series()
handles = []
names = []
for l in lines:
if l.name is not None:
handles.append(l.line)
names.append(l.name)
# Show the legend (at selected position or at best)
if pos:
l = axes.legend(handles, names, bbox_to_anchor=pos)
plt.legPos = pos
else:
l = axes.legend(handles, names, loc='best')
# Update canvas in order to compute legend data
plt.canvas.draw()
# Get resultant position
try:
fax = axes.get_frame().get_extents()
except:
fax = axes.patch.get_extents()
fl = l.get_frame()
plt.legPos = (
(fl._x + fl._width - fax.x0) / fax.width,
(fl._y + fl._height - fax.y0) / fax.height)
# Set fontsize
for t in l.get_texts():
t.set_fontsize(plt.legSiz)
plt.update()
def grid(status=True):
"""Show/Hide the grid from the active plot.
Keyword arguments:
status -- True if grid must be shown, False otherwise.
"""
plt = getPlot()
if not plt:
return
plt.grid = status
axes = plt.axes
axes.grid(status)
plt.update()
def title(string):
"""Setup the plot title.
Keyword arguments:
string -- Plot title.
"""
plt = getPlot()
if not plt:
return
axes = plt.axes
axes.set_title(string)
plt.update()
def xlabel(string):
"""Setup the x label.
Keyword arguments:
string -- Title to set.
"""
plt = getPlot()
if not plt:
return
axes = plt.axes
axes.set_xlabel(string)
plt.update()
def ylabel(string):
"""Setup the y label.
Keyword arguments:
string -- Title to set.
"""
plt = getPlot()
if not plt:
return
axes = plt.axes
axes.set_ylabel(string)
plt.update()
def axesList():
"""Return the plot axes sets list. """
plt = getPlot()
if not plt:
return []
return plt.axesList
def axes():
"""Return the active plot axes."""
plt = getPlot()
if not plt:
return None
return plt.axes
def addNewAxes(rect=None, frameon=True, patchcolor='none'):
"""Add new axes to plot, setting it as the active one.
Keyword arguments:
rect -- Axes area, None to copy from the last axes data.
frameon -- True to show frame, False otherwise.
patchcolor -- Patch color, 'none' for transparent plot.
"""
plt = getPlot()
if not plt:
return None
fig = plt.fig
if rect is None:
rect = plt.axes.get_position()
ax = fig.add_axes(rect, frameon=frameon)
ax.xaxis.set_ticks_position('bottom')
ax.spines['top'].set_color('none')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_color('none')
ax.patch.set_facecolor(patchcolor)
plt.axesList.append(ax)
plt.setActiveAxes(-1)
plt.update()
return ax
def save(path, figsize=None, dpi=None):
"""Save plot.
Keyword arguments:
path -- Destination file path.
figsize -- w,h figure size tuple in inches.
dpi -- Dots per inch.
"""
plt = getPlot()
if not plt:
return
# Backup figure options
fig = plt.fig
sizeBack = fig.get_size_inches()
dpiBack = fig.get_dpi()
# Save figure with new options
if figsize:
fig.set_size_inches(figsize[0], figsize[1])
if dpi:
fig.set_dpi(dpi)
plt.canvas.print_figure(path)
# Restore figure options
fig.set_size_inches(sizeBack[0], sizeBack[1])
fig.set_dpi(dpiBack)
plt.update()
class Line():
def __init__(self, axes, x, y, name):
"""Construct a new plot serie.
Keyword arguments:
axes -- Active axes
x -- X values
y -- Y values
name -- Data serie name (for legend).
"""
self.axes = axes
self.x = x
self.y = y
self.name = name
self.lid = len(axes.lines)
self.line, = axes.plot(x, y)
def setp(self, prop, value):
"""Change a line property value.
Keyword arguments:
prop -- Property name.
value -- New property value.
"""
plt.setp(self.line, prop, value)
def getp(self, prop):
"""Get line property value.
Keyword arguments:
prop -- Property name.
"""
return plt.getp(self.line, prop)
class Plot(PySide.QtGui.QWidget):
def __init__(self,
winTitle="plot",
parent=None,
flags=PySide.QtCore.Qt.WindowFlags(0)):
"""Construct a new plot widget.
Keyword arguments:
winTitle -- Tab title.
parent -- Widget parent.
flags -- QWidget flags
"""
PySide.QtGui.QWidget.__init__(self, parent, flags)
self.setWindowTitle(winTitle)
# Create matplotlib canvas
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
# Get axes
self.axes = self.fig.add_subplot(111)
self.axesList = [self.axes]
self.axes.xaxis.set_ticks_position('bottom')
self.axes.spines['top'].set_color('none')
self.axes.yaxis.set_ticks_position('left')
self.axes.spines['right'].set_color('none')
# Setup layout
vbox = PySide.QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Active series
self.series = []
# Indicators
self.skip = False
self.legend = False
self.legPos = (1.0, 1.0)
self.legSiz = 14
self.grid = False
def plot(self, x, y, name=None):
"""Plot a new line and return it.
Keyword arguments:
x -- X values
y -- Y values
name -- Serie name (for legend). """
l = Line(self.axes, x, y, name)
self.series.append(l)
# Update window
self.update()
return l
def update(self):
"""Update the plot, redrawing the canvas."""
if not self.skip:
self.skip = True
if self.legend:
legend(self.legend, self.legPos, self.legSiz)
self.canvas.draw()
self.skip = False
def isGrid(self):
"""Return True if Grid is active, False otherwise."""
return bool(self.grid)
def isLegend(self):
"""Return True if Legend is active, False otherwise."""
return bool(self.legend)
def setActiveAxes(self, index):
"""Change the current active axes.
Keyword arguments:
index -- Index of the new active axes set.
"""
self.axes = self.axesList[index]
self.fig.sca(self.axes)
| lgpl-2.1 |
glouppe/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
mjfarmer/scada_py | env/lib/python2.7/site-packages/IPython/core/usage.py | 13 | 23364 | # -*- coding: utf-8 -*-
"""Usage information for the main IPython applications.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
# Copyright (C) 2001-2007 Fernando Perez. <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
from IPython.core import release
cl_usage = """\
=========
IPython
=========
Tools for Interactive Computing in Python
=========================================
A Python shell with automatic history (input and output), dynamic object
introspection, easier configuration, command completion, access to the
system shell and more. IPython can also be embedded in running programs.
Usage
ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ...
If invoked with no options, it executes the file and exits, passing the
remaining arguments to the script, just as if you had specified the same
command with python. You may need to specify `--` before args to be passed
to the script, to prevent IPython from attempting to parse them. If you
specify the option `-i` before the filename, it will enter an interactive
IPython session after running the script, rather than exiting. Files ending
in .py will be treated as normal Python, but files ending in .ipy can
contain special IPython syntax (magic commands, shell expansions, etc.).
Almost all configuration in IPython is available via the command-line. Do
`ipython --help-all` to see all available options. For persistent
configuration, look into your `ipython_config.py` configuration file for
details.
This file is typically installed in the `IPYTHONDIR` directory, and there
is a separate configuration directory for each profile. The default profile
directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR
defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to
C:\\Users\\YourUserName in most instances.
To initialize a profile with the default configuration file, do::
$> ipython profile create
and start editing `IPYTHONDIR/profile_default/ipython_config.py`
In IPython's documentation, we will refer to this directory as
`IPYTHONDIR`, you can change its default location by creating an
environment variable with this name and setting it to the desired path.
For more information, see the manual available in HTML and PDF in your
installation, or online at http://ipython.org/documentation.html.
"""
interactive_usage = """
IPython -- An enhanced Interactive Python
=========================================
IPython offers a combination of convenient shell features, special commands
and a history mechanism for both input (command history) and output (results
caching, similar to Mathematica). It is intended to be a fully compatible
replacement for the standard Python interpreter, while offering vastly
improved functionality and flexibility.
At your system command line, type 'ipython -h' to see the command line
options available. This document only describes interactive features.
MAIN FEATURES
-------------
* Access to the standard Python help. As of Python 2.1, a help system is
available with access to object docstrings and the Python manuals. Simply
type 'help' (no quotes) to access it.
* Magic commands: type %magic for information on the magic subsystem.
* System command aliases, via the %alias command or the configuration file(s).
* Dynamic object information:
Typing ?word or word? prints detailed information about an object. If
certain strings in the object are too long (docstrings, code, etc.) they get
snipped in the center for brevity.
Typing ??word or word?? gives access to the full information without
snipping long strings. Long strings are sent to the screen through the less
pager if longer than the screen, printed otherwise.
The ?/?? system gives access to the full source code for any object (if
available), shows function prototypes and other useful information.
If you just want to see an object's docstring, type '%pdoc object' (without
quotes, and without % if you have automagic on).
* Completion in the local namespace, by typing TAB at the prompt.
At any time, hitting tab will complete any available python commands or
variable names, and show you a list of the possible completions if there's
no unambiguous one. It will also complete filenames in the current directory.
This feature requires the readline and rlcomplete modules, so it won't work
if your Python lacks readline support (such as under Windows).
* Search previous command history in two ways (also requires readline):
- Start typing, and then use Ctrl-p (previous,up) and Ctrl-n (next,down) to
search through only the history items that match what you've typed so
far. If you use Ctrl-p/Ctrl-n at a blank prompt, they just behave like
normal arrow keys.
- Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
your history for lines that match what you've typed so far, completing as
much as it can.
- %hist: search history by index (this does *not* require readline).
* Persistent command history across sessions.
* Logging of input with the ability to save and restore a working session.
* System escape with !. Typing !ls will run 'ls' in the current directory.
* The reload command does a 'deep' reload of a module: changes made to the
module since you imported will actually be available without having to exit.
* Verbose and colored exception traceback printouts. See the magic xmode and
xcolor functions for details (just type %magic).
* Input caching system:
IPython offers numbered prompts (In/Out) with input and output caching. All
input is saved and can be retrieved as variables (besides the usual arrow
key recall).
The following GLOBAL variables always exist (so don't overwrite them!):
_i: stores previous input.
_ii: next previous.
_iii: next-next previous.
_ih : a list of all input _ih[n] is the input from line n.
Additionally, global variables named _i<n> are dynamically created (<n>
being the prompt counter), such that _i<n> == _ih[<n>]
For example, what you typed at prompt 14 is available as _i14 and _ih[14].
You can create macros which contain multiple input lines from this history,
for later re-execution, with the %macro function.
The history function %hist allows you to see any part of your input history
by printing a range of the _i variables. Note that inputs which contain
magic functions (%) appear in the history with a prepended comment. This is
because they aren't really valid Python code, so you can't exec them.
* Output caching system:
For output that is returned from actions, a system similar to the input
cache exists but using _ instead of _i. Only actions that produce a result
(NOT assignments, for example) are cached. If you are familiar with
Mathematica, IPython's _ variables behave exactly like Mathematica's %
variables.
The following GLOBAL variables always exist (so don't overwrite them!):
_ (one underscore): previous output.
__ (two underscores): next previous.
___ (three underscores): next-next previous.
Global variables named _<n> are dynamically created (<n> being the prompt
counter), such that the result of output <n> is always available as _<n>.
Finally, a global dictionary named _oh exists with entries for all lines
which generated output.
* Directory history:
Your history of visited directories is kept in the global list _dh, and the
magic %cd command can be used to go to any entry in that list.
* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
1. Auto-parentheses
Callable objects (i.e. functions, methods, etc) can be invoked like
this (notice the commas between the arguments)::
In [1]: callable_ob arg1, arg2, arg3
and the input will be translated to this::
callable_ob(arg1, arg2, arg3)
This feature is off by default (in rare cases it can produce
undesirable side-effects), but you can activate it at the command-line
by starting IPython with `--autocall 1`, set it permanently in your
configuration file, or turn on at runtime with `%autocall 1`.
You can force auto-parentheses by using '/' as the first character
of a line. For example::
In [1]: /globals # becomes 'globals()'
Note that the '/' MUST be the first character on the line! This
won't work::
In [2]: print /globals # syntax error
In most cases the automatic algorithm should work, so you should
rarely need to explicitly invoke /. One notable exception is if you
are trying to call a function with a list of tuples as arguments (the
parenthesis will confuse IPython)::
In [1]: zip (1,2,3),(4,5,6) # won't work
but this will work::
In [2]: /zip (1,2,3),(4,5,6)
------> zip ((1,2,3),(4,5,6))
Out[2]= [(1, 4), (2, 5), (3, 6)]
IPython tells you that it has altered your command line by
displaying the new command line preceded by -->. e.g.::
In [18]: callable list
-------> callable (list)
2. Auto-Quoting
You can force auto-quoting of a function's arguments by using ',' as
the first character of a line. For example::
In [1]: ,my_function /home/me # becomes my_function("/home/me")
If you use ';' instead, the whole argument is quoted as a single
string (while ',' splits on whitespace)::
In [2]: ,my_function a b c # becomes my_function("a","b","c")
In [3]: ;my_function a b c # becomes my_function("a b c")
Note that the ',' MUST be the first character on the line! This
won't work::
In [4]: x = ,my_function /home/me # syntax error
"""
interactive_usage_min = """\
An enhanced console for Python.
Some of its features are:
- Readline support if the readline library is present.
- Tab completion in the local namespace.
- Logging of input, see command-line options.
- System shell escape via ! , eg !ls.
- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.)
- Keeps track of locally defined variables via %who, %whos.
- Show object information with a ? eg ?x or x? (use ?? for more info).
"""
quick_reference = r"""
IPython -- An enhanced Interactive Python - Quick Reference Card
================================================================
obj?, obj?? : Get help, or more help for object (also works as
?obj, ??obj).
?foo.*abc* : List names in 'foo' containing 'abc' in them.
%magic : Information about IPython's 'magic' % functions.
Magic functions are prefixed by % or %%, and typically take their arguments
without parentheses, quotes or even commas for convenience. Line magics take a
single % and cell magics are prefixed with two %%.
Example magic function calls:
%alias d ls -F : 'd' is now an alias for 'ls -F'
alias d ls -F : Works if 'alias' not a python name
alist = %alias : Get list of aliases to 'alist'
cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
%cd?? : See help AND source for magic %cd
%timeit x=10 : time the 'x=10' statement with high precision.
%%timeit x=2**100
x**100 : time 'x**100' with a setup of 'x=2**100'; setup code is not
counted. This is an example of a cell magic.
System commands:
!cp a.txt b/ : System command escape, calls os.system()
cp a.txt b/ : after %rehashx, most system commands work without !
cp ${f}.txt $bar : Variable expansion in magics and system commands
files = !ls /usr : Capture sytem command output
files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
History:
_i, _ii, _iii : Previous, next previous, next next previous input
_i4, _ih[2:5] : Input history line 4, lines 2-4
exec _i81 : Execute input history line #81 again
%rep 81 : Edit input history line #81
_, __, ___ : previous, next previous, next next previous output
_dh : Directory history
_oh : Output history
%hist : Command history of current session.
%hist -g foo : Search command history of (almost) all sessions for 'foo'.
%hist -g : Command history of (almost) all sessions.
%hist 1/2-8 : Command history containing lines 2-8 of session 1.
%hist 1/ ~2/ : Command history of session 1 and 2 sessions before current.
%hist ~8/1-~6/5 : Command history from line 1 of 8 sessions ago to
line 5 of 6 sessions ago.
%edit 0/ : Open editor to execute code with history of current session.
Autocall:
f 1,2 : f(1,2) # Off by default, enable with %autocall magic.
/f 1,2 : f(1,2) (forced autoparen)
,f 1 2 : f("1","2")
;f 1 2 : f("1 2")
Remember: TAB completion works in many contexts, not just file names
or python names.
The following magic functions are currently available:
"""
gui_reference = """\
===============================
The graphical IPython console
===============================
This console is designed to emulate the look, feel and workflow of a terminal
environment, while adding a number of enhancements that are simply not possible
in a real terminal, such as inline syntax highlighting, true multiline editing,
inline graphics and much more.
This quick reference document contains the basic information you'll need to
know to make the most efficient use of it. For the various command line
options available at startup, type ``ipython qtconsole --help`` at the command line.
Multiline editing
=================
The graphical console is capable of true multiline editing, but it also tries
to behave intuitively like a terminal when possible. If you are used to
IPython's old terminal behavior, you should find the transition painless, and
once you learn a few basic keybindings it will be a much more efficient
environment.
For single expressions or indented blocks, the console behaves almost like the
terminal IPython: single expressions are immediately evaluated, and indented
blocks are evaluated once a single blank line is entered::
In [1]: print "Hello IPython!" # Enter was pressed at the end of the line
Hello IPython!
In [2]: for i in range(10):
...: print i,
...:
0 1 2 3 4 5 6 7 8 9
If you want to enter more than one expression in a single input block
(something not possible in the terminal), you can use ``Control-Enter`` at the
end of your first line instead of ``Enter``. At that point the console goes
into 'cell mode' and even if your inputs are not indented, it will continue
accepting arbitrarily many lines until either you enter an extra blank line or
you hit ``Shift-Enter`` (the key binding that forces execution). When a
multiline cell is entered, IPython analyzes it and executes its code producing
an ``Out[n]`` prompt only for the last expression in it, while the rest of the
cell is executed as if it was a script. An example should clarify this::
In [3]: x=1 # Hit C-Enter here
...: y=2 # from now on, regular Enter is sufficient
...: z=3
...: x**2 # This does *not* produce an Out[] value
...: x+y+z # Only the last expression does
...:
Out[3]: 6
The behavior where an extra blank line forces execution is only active if you
are actually typing at the keyboard each line, and is meant to make it mimic
the IPython terminal behavior. If you paste a long chunk of input (for example
a long script copied form an editor or web browser), it can contain arbitrarily
many intermediate blank lines and they won't cause any problems. As always,
you can then make it execute by appending a blank line *at the end* or hitting
``Shift-Enter`` anywhere within the cell.
With the up arrow key, you can retrieve previous blocks of input that contain
multiple lines. You can move inside of a multiline cell like you would in any
text editor. When you want it executed, the simplest thing to do is to hit the
force execution key, ``Shift-Enter`` (though you can also navigate to the end
and append a blank line by using ``Enter`` twice).
If you've edited a multiline cell and accidentally navigate out of it with the
up or down arrow keys, IPython will clear the cell and replace it with the
contents of the one above or below that you navigated to. If this was an
accident and you want to retrieve the cell you were editing, use the Undo
keybinding, ``Control-z``.
Key bindings
============
The IPython console supports most of the basic Emacs line-oriented keybindings,
in addition to some of its own.
The keybinding prefixes mean:
- ``C``: Control
- ``S``: Shift
- ``M``: Meta (typically the Alt key)
The keybindings themselves are:
- ``Enter``: insert new line (may cause execution, see above).
- ``C-Enter``: *force* new line, *never* causes execution.
- ``S-Enter``: *force* execution regardless of where cursor is, no newline added.
- ``Up``: step backwards through the history.
- ``Down``: step forwards through the history.
- ``S-Up``: search backwards through the history (like ``C-r`` in bash).
- ``S-Down``: search forwards through the history.
- ``C-c``: copy highlighted text to clipboard (prompts are automatically stripped).
- ``C-S-c``: copy highlighted text to clipboard (prompts are not stripped).
- ``C-v``: paste text from clipboard.
- ``C-z``: undo (retrieves lost text if you move out of a cell with the arrows).
- ``C-S-z``: redo.
- ``C-o``: move to 'other' area, between pager and terminal.
- ``C-l``: clear terminal.
- ``C-a``: go to beginning of line.
- ``C-e``: go to end of line.
- ``C-u``: kill from cursor to the begining of the line.
- ``C-k``: kill from cursor to the end of the line.
- ``C-y``: yank (paste)
- ``C-p``: previous line (like up arrow)
- ``C-n``: next line (like down arrow)
- ``C-f``: forward (like right arrow)
- ``C-b``: back (like left arrow)
- ``C-d``: delete next character, or exits if input is empty
- ``M-<``: move to the beginning of the input region.
- ``M->``: move to the end of the input region.
- ``M-d``: delete next word.
- ``M-Backspace``: delete previous word.
- ``C-.``: force a kernel restart (a confirmation dialog appears).
- ``C-+``: increase font size.
- ``C--``: decrease font size.
- ``C-M-Space``: toggle full screen. (Command-Control-Space on Mac OS X)
The IPython pager
=================
IPython will show long blocks of text from many sources using a builtin pager.
You can control where this pager appears with the ``--paging`` command-line
flag:
- ``inside`` [default]: the pager is overlaid on top of the main terminal. You
must quit the pager to get back to the terminal (similar to how a pager such
as ``less`` or ``more`` works).
- ``vsplit``: the console is made double-tall, and the pager appears on the
bottom area when needed. You can view its contents while using the terminal.
- ``hsplit``: the console is made double-wide, and the pager appears on the
right area when needed. You can view its contents while using the terminal.
- ``none``: the console never pages output.
If you use the vertical or horizontal paging modes, you can navigate between
terminal and pager as follows:
- Tab key: goes from pager to terminal (but not the other way around).
- Control-o: goes from one to another always.
- Mouse: click on either.
In all cases, the ``q`` or ``Escape`` keys quit the pager (when used with the
focus on the pager area).
Running subprocesses
====================
The graphical IPython console uses the ``pexpect`` module to run subprocesses
when you type ``!command``. This has a number of advantages (true asynchronous
output from subprocesses as well as very robust termination of rogue
subprocesses with ``Control-C``), as well as some limitations. The main
limitation is that you can *not* interact back with the subprocess, so anything
that invokes a pager or expects you to type input into it will block and hang
(you can kill it with ``Control-C``).
We have provided as magics ``%less`` to page files (aliased to ``%more``),
``%clear`` to clear the terminal, and ``%man`` on Linux/OSX. These cover the
most common commands you'd want to call in your subshell and that would cause
problems if invoked via ``!cmd``, but you need to be aware of this limitation.
Display
=======
The IPython console can now display objects in a variety of formats, including
HTML, PNG and SVG. This is accomplished using the display functions in
``IPython.core.display``::
In [4]: from IPython.core.display import display, display_html
In [5]: from IPython.core.display import display_png, display_svg
Python objects can simply be passed to these functions and the appropriate
representations will be displayed in the console as long as the objects know
how to compute those representations. The easiest way of teaching objects how
to format themselves in various representations is to define special methods
such as: ``_repr_html_``, ``_repr_svg_`` and ``_repr_png_``. IPython's display formatters
can also be given custom formatter functions for various types::
In [6]: ip = get_ipython()
In [7]: html_formatter = ip.display_formatter.formatters['text/html']
In [8]: html_formatter.for_type(Foo, foo_to_html)
For further details, see ``IPython.core.formatters``.
Inline matplotlib graphics
==========================
The IPython console is capable of displaying matplotlib figures inline, in SVG
or PNG format. If started with the ``matplotlib=inline``, then all figures are
rendered inline automatically (PNG by default). If started with ``--matplotlib``
or ``matplotlib=<your backend>``, then a GUI backend will be used, but IPython's
``display()`` and ``getfigs()`` functions can be used to view plots inline::
In [9]: display(*getfigs()) # display all figures inline
In[10]: display(*getfigs(1,2)) # display figures 1 and 2 inline
"""
quick_guide = """\
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
"""
gui_note = """\
%guiref -> A brief reference about the graphical user interface.
"""
default_banner_parts = [
'Python %s\n' % (sys.version.split('\n')[0],),
'Type "copyright", "credits" or "license" for more information.\n\n',
'IPython {version} -- An enhanced Interactive Python.\n'.format(
version=release.version,
),
quick_guide
]
default_gui_banner_parts = default_banner_parts + [gui_note]
default_banner = ''.join(default_banner_parts)
default_gui_banner = ''.join(default_gui_banner_parts)
# page GUI Reference, for use as a magic:
def page_guiref(arg_s=None):
"""Show a basic reference about the GUI Console."""
from IPython.core import page
page.page(gui_reference)
| gpl-3.0 |
wathen/PhD | MHD/FEniCS/MHD/CG/PicardIter_Direct/DecoupleTest/MHD.py | 1 | 10388 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time as t
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
m = 2
IterType = 'MD'
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
nn = 2
mm = 1
MUsave = np.zeros((mm,1))
MUit = np.zeros((m-1,mm))
print MUit[0,0]
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
R = 010.0
for yy in xrange(1,mm+1):
MU =(R*10**(-yy))
print "++++++++",MU
MUsave[yy-1] = MU
for xx in xrange(1,m):
print xx
level[xx-1] = xx+5
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
parameters["form_compiler"]["quadrature_degree"] = -1
mesh = UnitSquareMesh(nn,nn)
print parameters
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(2,2)
# plot(interpolate(u0,Velocity))
p0 = interpolate(p0,Pressure)
p0.vector()[:] -= np.max(p0.vector().array() )/2
# plot(interpolate(p0,Pressure))
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1
Mu_m =1e4
# MU = 1.0
print "================================",MU
F_NS = -MU*Laplacian + Advection + gradPres - kappa*NS_Couple
F_M = Mu_m*kappa*CurlCurl + gradR - kappa*M_Couple
params = [kappa,Mu_m,MU]
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,Neumann=Expression(("0","0")),options ="New")
# plot(u_k)
# plot(p_k)
# t.sleep(10)
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
pConst = -assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += pConst
x = Iter.u_prev(u_k,p_k,b_k,r_k)
# plot(b_k)
print params
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W, F_M, F_NS, u_k, b_k, params, IterType)
print params
RHSform = forms.PicardRHS(mesh, W, u_k,p_k,b_k,r_k,params)
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-7 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
parameters['linear_algebra_backend'] = 'uBLAS'
p = forms.Preconditioner(mesh,W, u_k,b_k, params,IterType)
PP,Pb = assemble_system(p, Lns,bcs)
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
u = b.duplicate()
P = CP.Assemble(PP)
while eps > tol and iter < maxiter:
iter += 1
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
A,b = CP.Assemble(AA,bb)
u = b.duplicate()
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
u = b.duplicate()
ksp = PETSc.KSP().create()
pc = ksp.getPC()#.PC().create()
# P = MO.shift(A,0.000001)
ksp.setOperators(A )
del A
OptDB = PETSc.Options()
OptDB["ksp_type"] = "preonly"
OptDB["pc_type"] = "lu"
OptDB["pc_factor_mat_ordering_type"] = "rcm"
OptDB["pc_factor_mat_solver_package"] = "mumps"
# OptDB["pc_factor_shift_amount"] = 2
ksp.setFromOptions()
tic()
ksp.solve(b, u)
time = toc()
print time
SolutionTime = SolutionTime +time
del ksp, pc
u, p, b, r, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p.vector()[:] += - assemble(p*dx)/assemble(ones*dx)
u_k.assign(u)
p_k.assign(p)
b_k.assign(b)
r_k.assign(r)
# plot(u_k)
# plot(p_k)
# t.sleep(1)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
# u_k,b_k,epsu,epsb=Iter.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
MUit[xx-1,yy-1]= iter
# SolTime[xx-1] = SolutionTime/iter
ue =u0
pe = p0
be = b0
re = r0
ExactSolution = [ue,pe,be,re]
# errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim)
# if xx == 1:
# l2uorder[xx-1] = 0
# else:
# l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
# H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
# l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
import pandas as pd
LatexTitles = ["l","DoF"]
for x in xrange(1,mm+1):
LatexTitles.append("it")
LatexValues = np.concatenate((level,Wdim,MUit), axis=1)
title = np.concatenate((np.array([[0,0]]),MUsave.T),axis=1)
LatexValues = np.vstack((title,LatexValues))
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
print LatexTable.to_latex()
# LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
# LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
# LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
# pd.set_option('precision',3)
# LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
# print LatexTable.to_latex()
# print "\n\n Magnetic convergence"
# MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
# MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
# MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
# print MagneticTable.to_latex()
# print "\n\n Lagrange convergence"
# LagrangeTitles = ["l","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
# LagrangeValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
# LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
# pd.set_option('precision',3)
# LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,"H1-order","%1.2f")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'L2-order',"%1.2f")
# print LagrangeTable.to_latex()
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(b_k)
# plot(r_k)
plot(p_k)
# # plot(ba)
plot(interpolate(pe,Pressure))
# # plot(ra)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
| mit |
anirudhjayaraman/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
RPGOne/scikit-learn | benchmarks/bench_plot_neighbors.py | 101 | 6469 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
plt.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = plt.subplot(sbplt, yscale='log')
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = plt.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
plt.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
plt.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
plt.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
plt.show()
| bsd-3-clause |
inside-track/pemi | pemi/pipes/csv.py | 1 | 4359 | import os
import re
import pandas as pd
import pemi
from pemi.pipes.patterns import TargetPipe
def default_column_normalizer(name):
name = str(name)
name = re.sub(r'\s+', '_', name).lower()
name = re.sub(r'[^a-z0-9_]', '_', name)
return name
class StrConverter(dict):
def __contains__(self, key):
return True
def __getitem__(self, key):
return str
class LocalCsvFileSourcePipe(pemi.Pipe):
def __init__(self, *, paths, schema=None, csv_opts=None,
filename_field=None, filename_full_path=False, normalize_columns=True, **params):
super().__init__(**params)
self.paths = paths
self.schema = schema
self.filename_field = filename_field
self.filename_full_path = filename_full_path
self.csv_opts = self._build_csv_opts(csv_opts or {})
if callable(normalize_columns):
self.column_normalizer = normalize_columns
elif normalize_columns:
self.column_normalizer = default_column_normalizer
else:
self.column_normalizer = lambda col: col
self.target(
pemi.PdDataSubject,
name='main',
schema=self.schema
)
self.target(
pemi.PdDataSubject,
name='errors',
schema=self.schema
)
def extract(self):
return self.paths
def parse(self, data):
pemi.log.debug('Parsing files at %s', data)
filepaths = data
mapped_dfs = []
error_dfs = []
for filepath in filepaths:
parsed_dfs = self._parse_one(filepath)
mapped_dfs.append(parsed_dfs.mapped)
error_dfs.append(parsed_dfs.errors)
if len(filepaths) > 0:
self.targets['main'].df = pd.concat(mapped_dfs, sort=False)
self.targets['errors'].df = pd.concat(error_dfs, sort=False)
elif self.schema:
self.targets['main'].df = pd.DataFrame(columns=list(self.schema.keys()))
self.targets['errors'].df = pd.DataFrame(columns=list(self.schema.keys()))
else:
self.targets['main'].df = pd.DataFrame()
self.targets['errors'].df = pd.DataFrame()
pemi.log.debug('Parsed %i records', len(self.targets['main'].df))
return self.targets['main'].df
def _build_csv_opts(self, user_csv_opts):
if self.schema:
file_fieldnames = [k for k in self.schema.keys() if k != self.filename_field]
usecols = lambda col: self.column_normalizer(col) in file_fieldnames
else:
usecols = None
mandatory_opts = {
'converters': StrConverter(),
'usecols': usecols
}
default_opts = {
'engine': 'c',
'error_bad_lines': True
}
return {**default_opts, **user_csv_opts, **mandatory_opts}
def _parse_one(self, filepath):
pemi.log.debug('Parsing file at %s', filepath)
raw_df = pd.read_csv(filepath, **self.csv_opts)
pemi.log.debug('Found %i raw records', len(raw_df))
raw_df.columns = [self.column_normalizer(col) for col in raw_df.columns]
if self.filename_field:
if self.filename_full_path:
raw_df[self.filename_field] = filepath
else:
raw_df[self.filename_field] = os.path.basename(filepath)
if self.schema:
return raw_df.mapping(
[(name, name, field.coerce) for name, field in self.schema.items()],
on_error='redirect'
)
return raw_df.mapping([], inplace=True)
def flow(self):
self.parse(self.extract())
class LocalCsvFileTargetPipe(TargetPipe):
def __init__(self, *, path, csv_opts=None, **params):
super().__init__(**params)
self.path = path
self.csv_opts = self._build_csv_opts(csv_opts or {})
def encode(self):
return self.sources['main'].df
def load(self, encoded_data):
df = encoded_data
df.to_csv(self.path, **self.csv_opts)
return self.path
@staticmethod
def _build_csv_opts(user_csv_opts):
mandatory_opts = {}
default_opts = {
'index': False
}
return {**default_opts, **user_csv_opts, **mandatory_opts}
| mit |
geopandas/geopandas | geopandas/tests/test_overlay.py | 1 | 20494 | import os
import pandas as pd
from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box
from fiona.errors import DriverError
import geopandas
from geopandas import GeoDataFrame, GeoSeries, overlay, read_file
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
import pytest
DATA = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data", "overlay")
pytestmark = pytest.mark.skip_no_sindex
@pytest.fixture
def dfs(request):
s1 = GeoSeries(
[
Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
Polygon([(2, 2), (4, 2), (4, 4), (2, 4)]),
]
)
s2 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": s1})
df2 = GeoDataFrame({"col2": [1, 2], "geometry": s2})
return df1, df2
@pytest.fixture(params=["default-index", "int-index", "string-index"])
def dfs_index(request, dfs):
df1, df2 = dfs
if request.param == "int-index":
df1.index = [1, 2]
df2.index = [0, 2]
if request.param == "string-index":
df1.index = ["row1", "row2"]
return df1, df2
@pytest.fixture(
params=["union", "intersection", "difference", "symmetric_difference", "identity"]
)
def how(request):
return request.param
@pytest.fixture(params=[True, False])
def keep_geom_type(request):
return request.param
def test_overlay(dfs_index, how):
"""
Basic overlay test with small dummy example dataframes (from docs).
Results obtained using QGIS 2.16 (Vector -> Geoprocessing Tools ->
Intersection / Union / ...), saved to GeoJSON
"""
df1, df2 = dfs_index
result = overlay(df1, df2, how=how)
# construction of result
def _read(name):
expected = read_file(
os.path.join(DATA, "polys", "df1_df2-{0}.geojson".format(name))
)
expected.crs = None
return expected
if how == "identity":
expected_intersection = _read("intersection")
expected_difference = _read("difference")
expected = pd.concat(
[expected_intersection, expected_difference], ignore_index=True, sort=False
)
expected["col1"] = expected["col1"].astype(float)
else:
expected = _read(how)
# TODO needed adaptations to result
if how == "union":
result = result.sort_values(["col1", "col2"]).reset_index(drop=True)
elif how == "difference":
result = result.reset_index(drop=True)
assert_geodataframe_equal(result, expected, check_column_type=False)
# for difference also reversed
if how == "difference":
result = overlay(df2, df1, how=how)
result = result.reset_index(drop=True)
expected = _read("difference-inverse")
assert_geodataframe_equal(result, expected, check_column_type=False)
@pytest.mark.filterwarnings("ignore:GeoSeries crs mismatch:UserWarning")
def test_overlay_nybb(how):
polydf = read_file(geopandas.datasets.get_path("nybb"))
# The circles have been constructed and saved at the time the expected
# results were created (exact output of buffer algorithm can slightly
# change over time -> use saved ones)
# # construct circles dataframe
# N = 10
# b = [int(x) for x in polydf.total_bounds]
# polydf2 = GeoDataFrame(
# [
# {"geometry": Point(x, y).buffer(10000), "value1": x + y, "value2": x - y}
# for x, y in zip(
# range(b[0], b[2], int((b[2] - b[0]) / N)),
# range(b[1], b[3], int((b[3] - b[1]) / N)),
# )
# ],
# crs=polydf.crs,
# )
polydf2 = read_file(os.path.join(DATA, "nybb_qgis", "polydf2.shp"))
result = overlay(polydf, polydf2, how=how)
cols = ["BoroCode", "BoroName", "Shape_Leng", "Shape_Area", "value1", "value2"]
if how == "difference":
cols = cols[:-2]
# expected result
if how == "identity":
# read union one, further down below we take the appropriate subset
expected = read_file(os.path.join(DATA, "nybb_qgis", "qgis-union.shp"))
else:
expected = read_file(
os.path.join(DATA, "nybb_qgis", "qgis-{0}.shp".format(how))
)
# The result of QGIS for 'union' contains incorrect geometries:
# 24 is a full original circle overlapping with unioned geometries, and
# 27 is a completely duplicated row)
if how == "union":
expected = expected.drop([24, 27])
expected.reset_index(inplace=True, drop=True)
# Eliminate observations without geometries (issue from QGIS)
expected = expected[expected.is_valid]
expected.reset_index(inplace=True, drop=True)
if how == "identity":
expected = expected[expected.BoroCode.notnull()].copy()
# Order GeoDataFrames
expected = expected.sort_values(cols).reset_index(drop=True)
# TODO needed adaptations to result
result = result.sort_values(cols).reset_index(drop=True)
if how in ("union", "identity"):
# concat < 0.23 sorts, so changes the order of the columns
# but at least we ensure 'geometry' is the last column
assert result.columns[-1] == "geometry"
assert len(result.columns) == len(expected.columns)
result = result.reindex(columns=expected.columns)
# the ordering of the spatial index results causes slight deviations
# in the resultant geometries for multipolygons
# for more details on the discussion, see:
# https://github.com/geopandas/geopandas/pull/1338
# https://github.com/geopandas/geopandas/issues/1337
# Temporary workaround below:
# simplify multipolygon geometry comparison
# since the order of the constituent polygons depends on
# the ordering of spatial indexing results, we cannot
# compare symmetric_difference results directly when the
# resultant geometry is a multipolygon
# first, check that all bounds and areas are approx equal
# this is a very rough check for multipolygon equality
pd.testing.assert_series_equal(
result.geometry.area, expected.geometry.area, check_less_precise=True
)
pd.testing.assert_frame_equal(
result.geometry.bounds, expected.geometry.bounds, check_less_precise=True
)
# There are two cases where the multipolygon have a different number
# of sub-geometries -> not solved by normalize (and thus drop for now)
if how == "symmetric_difference":
expected.loc[9, "geometry"] = None
result.loc[9, "geometry"] = None
if how == "union":
expected.loc[24, "geometry"] = None
result.loc[24, "geometry"] = None
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_crs=False,
check_column_type=False,
check_less_precise=True,
)
def test_overlay_overlap(how):
"""
Overlay test with overlapping geometries in both dataframes.
Test files are created with::
import geopandas
from geopandas import GeoSeries, GeoDataFrame
from shapely.geometry import Point, Polygon, LineString
s1 = GeoSeries([Point(0, 0), Point(1.5, 0)]).buffer(1, resolution=2)
s2 = GeoSeries([Point(1, 1), Point(2, 2)]).buffer(1, resolution=2)
df1 = GeoDataFrame({'geometry': s1, 'col1':[1,2]})
df2 = GeoDataFrame({'geometry': s2, 'col2':[1, 2]})
ax = df1.plot(alpha=0.5)
df2.plot(alpha=0.5, ax=ax, color='C1')
df1.to_file('geopandas/geopandas/tests/data/df1_overlap.geojson',
driver='GeoJSON')
df2.to_file('geopandas/geopandas/tests/data/df2_overlap.geojson',
driver='GeoJSON')
and then overlay results are obtained from using QGIS 2.16
(Vector -> Geoprocessing Tools -> Intersection / Union / ...),
saved to GeoJSON.
"""
df1 = read_file(os.path.join(DATA, "overlap", "df1_overlap.geojson"))
df2 = read_file(os.path.join(DATA, "overlap", "df2_overlap.geojson"))
result = overlay(df1, df2, how=how)
if how == "identity":
raise pytest.skip()
expected = read_file(
os.path.join(DATA, "overlap", "df1_df2_overlap-{0}.geojson".format(how))
)
if how == "union":
# the QGIS result has the last row duplicated, so removing this
expected = expected.iloc[:-1]
# TODO needed adaptations to result
result = result.reset_index(drop=True)
if how == "union":
result = result.sort_values(["col1", "col2"]).reset_index(drop=True)
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_column_type=False,
check_less_precise=True,
)
@pytest.mark.parametrize("other_geometry", [False, True])
def test_geometry_not_named_geometry(dfs, how, other_geometry):
# Issue #306
# Add points and flip names
df1, df2 = dfs
df3 = df1.copy()
df3 = df3.rename(columns={"geometry": "polygons"})
df3 = df3.set_geometry("polygons")
if other_geometry:
df3["geometry"] = df1.centroid.geometry
assert df3.geometry.name == "polygons"
res1 = overlay(df1, df2, how=how)
res2 = overlay(df3, df2, how=how)
assert df3.geometry.name == "polygons"
if how == "difference":
# in case of 'difference', column names of left frame are preserved
assert res2.geometry.name == "polygons"
if other_geometry:
assert "geometry" in res2.columns
assert_geoseries_equal(
res2["geometry"], df3["geometry"], check_series_type=False
)
res2 = res2.drop(["geometry"], axis=1)
res2 = res2.rename(columns={"polygons": "geometry"})
res2 = res2.set_geometry("geometry")
# TODO if existing column is overwritten -> geometry not last column
if other_geometry and how == "intersection":
res2 = res2.reindex(columns=res1.columns)
assert_geodataframe_equal(res1, res2)
df4 = df2.copy()
df4 = df4.rename(columns={"geometry": "geom"})
df4 = df4.set_geometry("geom")
if other_geometry:
df4["geometry"] = df2.centroid.geometry
assert df4.geometry.name == "geom"
res1 = overlay(df1, df2, how=how)
res2 = overlay(df1, df4, how=how)
assert_geodataframe_equal(res1, res2)
def test_bad_how(dfs):
df1, df2 = dfs
with pytest.raises(ValueError):
overlay(df1, df2, how="spandex")
def test_duplicate_column_name(dfs):
df1, df2 = dfs
df2r = df2.rename(columns={"col2": "col1"})
res = overlay(df1, df2r, how="union")
assert ("col1_1" in res.columns) and ("col1_2" in res.columns)
def test_geoseries_warning(dfs):
df1, df2 = dfs
# Issue #305
with pytest.raises(NotImplementedError):
overlay(df1, df2.geometry, how="union")
def test_preserve_crs(dfs, how):
df1, df2 = dfs
result = overlay(df1, df2, how=how)
assert result.crs is None
crs = "epsg:4326"
df1.crs = crs
df2.crs = crs
result = overlay(df1, df2, how=how)
assert result.crs == crs
def test_crs_mismatch(dfs, how):
df1, df2 = dfs
df1.crs = 4326
df2.crs = 3857
with pytest.warns(UserWarning, match="CRS mismatch between the CRS"):
overlay(df1, df2, how=how)
def test_empty_intersection(dfs):
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(-1, -1), (-3, -1), (-3, -3), (-1, -3)]),
Polygon([(-3, -3), (-5, -3), (-5, -5), (-3, -5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3, "col3": [1, 2]})
expected = GeoDataFrame([], columns=["col1", "col3", "geometry"])
result = overlay(df1, df3)
assert_geodataframe_equal(result, expected, check_like=True)
def test_correct_index(dfs):
# GH883 - case where the index was not properly reset
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3, "col3": [1, 2, 3]})
i1 = Polygon([(1, 1), (1, 3), (3, 3), (3, 1), (1, 1)])
i2 = Polygon([(3, 3), (3, 5), (5, 5), (5, 3), (3, 3)])
expected = GeoDataFrame(
[[1, 1, i1], [3, 2, i2]], columns=["col3", "col2", "geometry"]
)
result = overlay(df3, df2, keep_geom_type=True)
assert_geodataframe_equal(result, expected)
def test_warn_on_keep_geom_type(dfs):
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3})
with pytest.warns(UserWarning, match="`keep_geom_type=True` in overlay"):
overlay(df2, df3, keep_geom_type=None)
@pytest.mark.parametrize(
"geom_types", ["polys", "poly_line", "poly_point", "line_poly", "point_poly"]
)
def test_overlay_strict(how, keep_geom_type, geom_types):
"""
Test of mixed geometry types on input and output. Expected results initially
generated using following snippet.
polys1 = gpd.GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])])
df1 = gpd.GeoDataFrame({'col1': [1, 2], 'geometry': polys1})
polys2 = gpd.GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])])
df2 = gpd.GeoDataFrame({'geometry': polys2, 'col2': [1, 2, 3]})
lines1 = gpd.GeoSeries([LineString([(2, 0), (2, 4), (6, 4)]),
LineString([(0, 3), (6, 3)])])
df3 = gpd.GeoDataFrame({'col3': [1, 2], 'geometry': lines1})
points1 = gpd.GeoSeries([Point((2, 2)),
Point((3, 3))])
df4 = gpd.GeoDataFrame({'col4': [1, 2], 'geometry': points1})
params=["union", "intersection", "difference", "symmetric_difference",
"identity"]
stricts = [True, False]
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df2, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('polys_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df3, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('poly_line_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df4, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('poly_point_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
"""
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
polys2 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df2 = GeoDataFrame({"geometry": polys2, "col2": [1, 2, 3]})
lines1 = GeoSeries(
[LineString([(2, 0), (2, 4), (6, 4)]), LineString([(0, 3), (6, 3)])]
)
df3 = GeoDataFrame({"col3": [1, 2], "geometry": lines1})
points1 = GeoSeries([Point((2, 2)), Point((3, 3))])
df4 = GeoDataFrame({"col4": [1, 2], "geometry": points1})
if geom_types == "polys":
result = overlay(df1, df2, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "poly_line":
result = overlay(df1, df3, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "poly_point":
result = overlay(df1, df4, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "line_poly":
result = overlay(df3, df1, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "point_poly":
result = overlay(df4, df1, how=how, keep_geom_type=keep_geom_type)
try:
expected = read_file(
os.path.join(
DATA,
"strict",
"{t}_{h}_{s}.geojson".format(t=geom_types, h=how, s=keep_geom_type),
)
)
# the order depends on the spatial index used
# so we sort the resultant dataframes to get a consistent order
# independently of the spatial index implementation
assert all(expected.columns == result.columns), "Column name mismatch"
cols = list(set(result.columns) - set(["geometry"]))
expected = expected.sort_values(cols, axis=0).reset_index(drop=True)
result = result.sort_values(cols, axis=0).reset_index(drop=True)
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_column_type=False,
check_less_precise=True,
check_crs=False,
check_dtype=False,
)
except DriverError: # fiona >= 1.8
assert result.empty
except OSError: # fiona < 1.8
assert result.empty
def test_mixed_geom_error():
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
mixed = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
LineString([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
dfmixed = GeoDataFrame({"col1": [1, 2], "geometry": mixed})
with pytest.raises(NotImplementedError):
overlay(df1, dfmixed, keep_geom_type=True)
def test_keep_geom_type_error():
gcol = GeoSeries(
GeometryCollection(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
LineString([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
)
dfcol = GeoDataFrame({"col1": [2], "geometry": gcol})
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
with pytest.raises(TypeError):
overlay(dfcol, df1, keep_geom_type=True)
def test_keep_geom_type_geometry_collection():
# GH 1581
df1 = read_file(os.path.join(DATA, "geom_type", "df1.geojson"))
df2 = read_file(os.path.join(DATA, "geom_type", "df2.geojson"))
intersection = overlay(df1, df2, keep_geom_type=True)
assert len(intersection) == 1
assert (intersection.geom_type == "Polygon").all()
intersection = overlay(df1, df2, keep_geom_type=False)
assert len(intersection) == 1
assert (intersection.geom_type == "GeometryCollection").all()
@pytest.mark.parametrize("make_valid", [True, False])
def test_overlap_make_valid(make_valid):
bowtie = Polygon([(1, 1), (9, 9), (9, 1), (1, 9), (1, 1)])
assert not bowtie.is_valid
fixed_bowtie = bowtie.buffer(0)
assert fixed_bowtie.is_valid
df1 = GeoDataFrame({"col1": ["region"], "geometry": GeoSeries([box(0, 0, 10, 10)])})
df_bowtie = GeoDataFrame(
{"col1": ["invalid", "valid"], "geometry": GeoSeries([bowtie, fixed_bowtie])}
)
if make_valid:
df_overlay_bowtie = overlay(df1, df_bowtie, make_valid=make_valid)
assert df_overlay_bowtie.at[0, "geometry"].equals(fixed_bowtie)
assert df_overlay_bowtie.at[1, "geometry"].equals(fixed_bowtie)
else:
with pytest.raises(ValueError, match="1 invalid input geometries"):
overlay(df1, df_bowtie, make_valid=make_valid)
def test_empty_overlay_return_non_duplicated_columns():
nybb = geopandas.read_file(geopandas.datasets.get_path("nybb"))
nybb2 = nybb.copy()
nybb2.geometry = nybb2.translate(20000000)
result = geopandas.overlay(nybb, nybb2)
assert all(result.columns.isin(nybb.columns))
assert len(result.columns) == len(nybb.columns)
| bsd-3-clause |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/core/indexes/interval.py | 6 | 34971 | """ define the IntervalIndex """
import numpy as np
from pandas.core.dtypes.missing import notnull, isnull
from pandas.core.dtypes.generic import ABCPeriodIndex
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_list_like,
is_datetime_or_timedelta_dtype,
is_integer_dtype,
is_object_dtype,
is_categorical_dtype,
is_float_dtype,
is_interval_dtype,
is_scalar,
is_integer)
from pandas.core.indexes.base import (
Index, _ensure_index,
default_pprint, _index_shared_docs)
from pandas._libs import Timestamp, Timedelta
from pandas._libs.interval import (
Interval, IntervalMixin, IntervalTree,
intervals_to_interval_bounds)
from pandas.core.indexes.multi import MultiIndex
from pandas.compat.numpy import function as nv
from pandas.core import common as com
from pandas.util._decorators import cache_readonly, Appender
from pandas.core.config import get_option
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
target_klass='IntervalIndex or list of Intervals'))
_VALID_CLOSED = set(['left', 'right', 'both', 'neither'])
def _get_next_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype):
return label + np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError('cannot determine next label for type %r'
% type(label))
def _get_prev_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype):
return label - np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError('cannot determine next label for type %r'
% type(label))
def _get_interval_closed_bounds(interval):
"""
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
"""
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
""" This is called upon unpickling,
rather than the default which doesn't
have arguments and breaks __new__ """
return cls.from_arrays(**d)
class IntervalIndex(IntervalMixin, Index):
"""
Immutable Index implementing an ordered, sliceable set. IntervalIndex
represents an Index of intervals that are all closed on the same side.
.. versionadded:: 0.20.0
Warning: the indexing behaviors are provisional and may change in
a future version of pandas.
Attributes
----------
left, right : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both or
neither. Defaults to 'right'.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
Copy the meta-data
See Also
--------
Index
"""
_typ = 'intervalindex'
_comparables = ['name']
_attributes = ['name', 'closed']
_allow_index_ops = True
# we would like our indexing holder to defer to us
_defer_to_indexing = True
_mask = None
def __new__(cls, data, closed='right',
name=None, copy=False, dtype=None,
fastpath=False, verify_integrity=True):
if fastpath:
return cls._simple_new(data.left, data.right, closed, name,
copy=copy, verify_integrity=False)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, IntervalIndex):
left = data.left
right = data.right
else:
# don't allow scalars
if is_scalar(data):
cls._scalar_data_error(data)
data = IntervalIndex.from_intervals(data, name=name)
left, right = data.left, data.right
return cls._simple_new(left, right, closed, name,
copy=copy, verify_integrity=verify_integrity)
@classmethod
def _simple_new(cls, left, right, closed=None, name=None,
copy=False, verify_integrity=True):
result = IntervalMixin.__new__(cls)
if closed is None:
closed = 'right'
left = _ensure_index(left, copy=copy)
right = _ensure_index(right, copy=copy)
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
right = right.astype(left.dtype)
if is_float_dtype(right) and is_integer_dtype(left):
left = left.astype(right.dtype)
if type(left) != type(right):
raise ValueError("must not have differing left [{}] "
"and right [{}] types".format(
type(left), type(right)))
if isinstance(left, ABCPeriodIndex):
raise ValueError("Period dtypes are not supported, "
"use a PeriodIndex instead")
result._left = left
result._right = right
result._closed = closed
result.name = name
if verify_integrity:
result._validate()
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, left=None, right=None, **kwargs):
if left is None:
# no values passed
left, right = self.left, self.right
elif right is None:
# only single value passed, could be an IntervalIndex
# or array of Intervals
if not isinstance(left, IntervalIndex):
left = type(self).from_intervals(left)
left, right = left.left, left.right
else:
# both left and right are values
pass
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['verify_integrity'] = False
return self._simple_new(left, right, **attributes)
def _validate(self):
"""
Verify that the IntervalIndex is valid.
"""
if self.closed not in _VALID_CLOSED:
raise ValueError("invalid options for 'closed': %s" % self.closed)
if len(self.left) != len(self.right):
raise ValueError('left and right must have the same length')
left_mask = notnull(self.left)
right_mask = notnull(self.right)
if not (left_mask == right_mask).all():
raise ValueError('missing values must be missing in the same '
'location both left and right sides')
if not (self.left[left_mask] <= self.right[left_mask]).all():
raise ValueError('left side of interval must be <= right side')
self._mask = ~left_mask
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return self._isnan.any()
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._mask is None:
self._mask = isnull(self.left)
return self._mask
@cache_readonly
def _engine(self):
return IntervalTree(self.left, self.right, closed=self.closed)
@property
def _constructor(self):
return type(self).from_intervals
def __contains__(self, key):
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
boolean
"""
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
def contains(self, key):
"""
return a boolean if this key is IN the index
We accept / allow keys to be not *just* actual
objects.
Parameters
----------
key : int, float, Interval
Returns
-------
boolean
"""
try:
self.get_loc(key)
return True
except KeyError:
return False
@classmethod
def from_breaks(cls, breaks, closed='right', name=None, copy=False):
"""
Construct an IntervalIndex from an array of splits
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both
or neither. Defaults to 'right'.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
copy the data
Examples
--------
>>> IntervalIndex.from_breaks([0, 1, 2, 3])
IntervalIndex(left=[0, 1, 2],
right=[1, 2, 3],
closed='right')
"""
breaks = np.asarray(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed,
name=name, copy=copy)
@classmethod
def from_arrays(cls, left, right, closed='right', name=None, copy=False):
"""
Construct an IntervalIndex from a a left and right array
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both
or neither. Defaults to 'right'.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
copy the data
Examples
--------
>>> IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
IntervalIndex(left=[0, 1, 2],
right=[1, 2, 3],
closed='right')
"""
left = np.asarray(left)
right = np.asarray(right)
return cls._simple_new(left, right, closed, name=name,
copy=copy, verify_integrity=True)
@classmethod
def from_intervals(cls, data, name=None, copy=False):
"""
Construct an IntervalIndex from a 1d array of Interval objects
Parameters
----------
data : array-like (1-dimensional)
Array of Interval objects. All intervals must be closed on the same
sides.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
Examples
--------
>>> IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
IntervalIndex(left=[0, 1],
right=[1, 2],
closed='right')
The generic Index constructor work identically when it infers an array
of all intervals:
>>> Index([Interval(0, 1), Interval(1, 2)])
IntervalIndex(left=[0, 1],
right=[1, 2],
closed='right')
"""
data = np.asarray(data)
left, right, closed = intervals_to_interval_bounds(data)
return cls.from_arrays(left, right, closed, name=name, copy=False)
@classmethod
def from_tuples(cls, data, closed='right', name=None, copy=False):
"""
Construct an IntervalIndex from a list/array of tuples
Parameters
----------
data : array-like (1-dimensional)
Array of tuples
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both
or neither. Defaults to 'right'.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
Examples
--------
"""
left = []
right = []
for d in data:
if isnull(d):
left.append(np.nan)
right.append(np.nan)
continue
l, r = d
left.append(l)
right.append(r)
# TODO
# if we have nulls and we previous had *only*
# integer data, then we have changed the dtype
return cls.from_arrays(left, right, closed, name=name, copy=False)
def to_tuples(self):
return Index(com._asarray_tuplesafe(zip(self.left, self.right)))
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right],
names=['left', 'right'])
@property
def left(self):
return self._left
@property
def right(self):
return self._right
@property
def closed(self):
return self._closed
def __len__(self):
return len(self.left)
@cache_readonly
def values(self):
"""
Returns the IntervalIndex's data as a numpy array of Interval
objects (with dtype='object')
"""
left = self.left
right = self.right
mask = self._isnan
closed = self._closed
result = np.empty(len(left), dtype=object)
for i in range(len(left)):
if mask[i]:
result[i] = np.nan
else:
result[i] = Interval(left[i], right[i], closed)
return result
def __array__(self, result=None):
""" the array interface, return my values """
return self.values
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
return result
def _array_values(self):
return self.values
def __reduce__(self):
d = dict(left=self.left,
right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs['copy'])
def copy(self, deep=False, name=None):
left = self.left.copy(deep=True) if deep else self.left
right = self.right.copy(deep=True) if deep else self.right
name = name if name is not None else self.name
return type(self).from_arrays(left, right, name=name)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
if copy:
self = self.copy()
return self
elif is_object_dtype(dtype):
return Index(self.values, dtype=object)
elif is_categorical_dtype(dtype):
from pandas import Categorical
return Categorical(self, ordered=True)
raise ValueError('Cannot cast IntervalIndex to dtype %s' % dtype)
@cache_readonly
def dtype(self):
return IntervalDtype.construct_from_string(str(self.left.dtype))
@property
def inferred_type(self):
return 'interval'
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explict engine
# so return the bytes here
return (self.left.memory_usage(deep=deep) +
self.right.memory_usage(deep=deep))
@cache_readonly
def mid(self):
"""Returns the mid-point of each interval in the index as an array
"""
try:
return Index(0.5 * (self.left.values + self.right.values))
except TypeError:
# datetime safe version
delta = self.right.values - self.left.values
return Index(self.left.values + 0.5 * delta)
@cache_readonly
def is_monotonic(self):
return self._multiindex.is_monotonic
@cache_readonly
def is_monotonic_increasing(self):
return self._multiindex.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self):
return self._multiindex.is_monotonic_decreasing
@cache_readonly
def is_unique(self):
return self._multiindex.is_unique
@cache_readonly
def is_non_overlapping_monotonic(self):
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
return ((self.right[:-1] <= self.left[1:]).all() or
(self.left[:-1] >= self.right[1:]).all())
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
return super(IntervalIndex, self)._convert_scalar_indexer(
key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
"""
we need to cast the key, which could be a scalar
or an array-like to the type of our subtype
"""
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype('float64')
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _check_method(self, method):
if method is None:
return
if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:
raise NotImplementedError(
'method {} not yet implemented for '
'IntervalIndex'.format(method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError('can only get slices from an IntervalIndex if '
'bounds are non-overlapping and all monotonic '
'increasing or decreasing')
if isinstance(label, IntervalMixin):
raise NotImplementedError
if ((side == 'left' and self.left.is_monotonic_increasing) or
(side == 'right' and self.left.is_monotonic_decreasing)):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _get_loc_only_exact_matches(self, key):
if isinstance(key, Interval):
if not self.is_unique:
raise ValueError("cannot index with a slice Interval"
" and a non-unique index")
# TODO: this expands to a tuple index, see if we can
# do better
return Index(self._multiindex.values).get_loc(key)
raise KeyError
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, 'left', exclude_label=key.open_left)
stop = self._searchsorted_monotonic(
key.right, 'right', exclude_label=key.open_right)
elif isinstance(key, slice):
# slice
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, 'left')
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, 'right')
else:
# scalar or index-like
start = self._searchsorted_monotonic(key, 'left')
stop = self._searchsorted_monotonic(key, 'right')
return start, stop
def get_loc(self, key, method=None):
self._check_method(method)
original_key = key
key = self._maybe_cast_indexed(key)
if self.is_non_overlapping_monotonic:
if isinstance(key, Interval):
left = self._maybe_cast_slice_bound(key.left, 'left', None)
right = self._maybe_cast_slice_bound(key.right, 'right', None)
key = Interval(left, right, key.closed)
else:
key = self._maybe_cast_slice_bound(key, 'left', None)
start, stop = self._find_non_overlapping_monotonic_bounds(key)
if start is None or stop is None:
return slice(start, stop)
elif start + 1 == stop:
return start
elif start < stop:
return slice(start, stop)
else:
raise KeyError(original_key)
else:
# use the interval tree
if isinstance(key, Interval):
left, right = _get_interval_closed_bounds(key)
return self._engine.get_loc_interval(left, right)
else:
return self._engine.get_loc(key)
def get_value(self, series, key):
if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default "
"step in a slice")
try:
loc = self.get_loc(key)
except TypeError:
# we didn't find exact intervals
# or are non-unique
raise ValueError("unable to slice with "
"this key: {}".format(key))
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
self._check_method(method)
target = _ensure_index(target)
target = self._maybe_cast_indexed(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if self.is_non_overlapping_monotonic:
start, stop = self._find_non_overlapping_monotonic_bounds(target)
start_plus_one = start + 1
if not ((start_plus_one < stop).any()):
return np.where(start_plus_one == stop, start, -1)
if not self.is_unique:
raise ValueError("cannot handle non-unique indices")
# IntervalIndex
if isinstance(target, IntervalIndex):
indexer = self._get_reindexer(target)
# non IntervalIndex
else:
indexer = np.concatenate([self.get_loc(i) for i in target])
return _ensure_platform_int(indexer)
def _get_reindexer(self, target):
"""
Return an indexer for a target IntervalIndex with self
"""
# find the left and right indexers
lindexer = self._engine.get_indexer(target.left.values)
rindexer = self._engine.get_indexer(target.right.values)
# we want to return an indexer on the intervals
# however, our keys could provide overlapping of multiple
# intervals, so we iterate thru the indexers and construct
# a set of indexers
indexer = []
n = len(self)
for i, (l, r) in enumerate(zip(lindexer, rindexer)):
target_value = target[i]
# matching on the lhs bound
if (l != -1 and
self.closed == 'right' and
target_value.left == self[l].right):
l += 1
# matching on the lhs bound
if (r != -1 and
self.closed == 'left' and
target_value.right == self[r].left):
r -= 1
# not found
if l == -1 and r == -1:
indexer.append(np.array([-1]))
elif r == -1:
indexer.append(np.arange(l, n))
elif l == -1:
# care about left/right closed here
value = self[i]
# target.closed same as self.closed
if self.closed == target.closed:
if target_value.left < value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'left'
elif self.closed == 'right':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'right'
elif self.closed == 'left':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
indexer.append(np.arange(0, r + 1))
else:
indexer.append(np.arange(l, r + 1))
return np.concatenate(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = self._maybe_cast_indexed(_ensure_index(target))
return super(IntervalIndex, self).get_indexer_non_unique(target)
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
if not isinstance(item, Interval):
raise ValueError('can only insert Interval objects into an '
'IntervalIndex')
if not item.closed == self.closed:
raise ValueError('inserted item must be closed on the same side '
'as the index')
new_left = self.left.insert(loc, item.left)
new_right = self.right.insert(loc, item.right)
return self._shallow_copy(new_left, new_right)
def _as_like_interval_index(self, other, error_msg):
self._assert_can_do_setop(other)
other = _ensure_index(other)
if (not isinstance(other, IntervalIndex) or
self.closed != other.closed):
raise ValueError(error_msg)
return other
def _append_same_dtype(self, to_concat, name):
"""
assert that we all have the same .closed
we allow a 0-len index here as well
"""
if not len(set([i.closed for i in to_concat if len(i)])) == 1:
msg = ('can only append two IntervalIndex objects '
'that are closed on the same side')
raise ValueError(msg)
return super(IntervalIndex, self)._append_same_dtype(to_concat, name)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
left, right = self.left, self.right
if fill_value is None:
fill_value = self._na_value
mask = indices == -1
if not mask.any():
# we won't change dtype here in this case
# if we don't need
allow_fill = False
taker = lambda x: x.take(indices, allow_fill=allow_fill,
fill_value=fill_value)
try:
new_left = taker(left)
new_right = taker(right)
except ValueError:
# we need to coerce; migth have NA's in an
# interger dtype
new_left = taker(left.astype(float))
new_right = taker(right.astype(float))
return self._shallow_copy(new_left, new_right)
def __getitem__(self, value):
mask = self._isnan[value]
if is_scalar(mask) and mask:
return self._na_value
left = self.left[value]
right = self.right[value]
# scalar
if not isinstance(left, Index):
return Interval(left, right, self.closed)
return self._shallow_copy(left, right)
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
from pandas.io.formats.format import IntervalArrayFormatter
return IntervalArrayFormatter(values=self,
na_rep=na_rep,
justify='all').get_result()
def _format_data(self):
# TODO: integrate with categorical and make generic
n = len(self)
max_seq_items = min((get_option(
'display.max_seq_items') or n) // 10, 10)
formatter = str
if n == 0:
summary = '[]'
elif n == 1:
first = formatter(self[0])
summary = '[{}]'.format(first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[{}, {}]'.format(first, last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = '[{} ... {}]'.format(', '.join(head),
', '.join(tail))
else:
head = []
tail = [formatter(x) for x in self]
summary = '[{}]'.format(', '.join(tail))
return summary + self._format_space()
def _format_attrs(self):
attrs = [('closed', repr(self.closed))]
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype))
return attrs
def _format_space(self):
return "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, '.values', other))
return (self.left.equals(other.left) and
self.right.equals(other.right) and
self.closed == other.closed)
def _setop(op_name):
def func(self, other):
msg = ('can only do set operations between two IntervalIndex '
'objects that are closed on the same side')
other = self._as_like_interval_index(other, msg)
result = getattr(self._multiindex, op_name)(other._multiindex)
result_name = self.name if self.name == other.name else None
return type(self).from_tuples(result.values, closed=self.closed,
name=result_name)
return func
union = _setop('union')
intersection = _setop('intersection')
difference = _setop('difference')
symmetric_differnce = _setop('symmetric_difference')
# TODO: arithmetic operations
IntervalIndex._add_logical_methods_disabled()
def interval_range(start=None, end=None, freq=None, periods=None,
name=None, closed='right', **kwargs):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
start : string or datetime-like, default None
Left bound for generating data
end : string or datetime-like, default None
Right bound for generating data
freq : interger, string or DateOffset, default 1
periods : interger, default None
name : str, default None
Name of the resulting index
closed : string, default 'right'
options are: 'left', 'right', 'both', 'neither'
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : IntervalIndex
"""
if freq is None:
freq = 1
if start is None:
if periods is None or end is None:
raise ValueError("must specify 2 of start, end, periods")
start = end - periods * freq
elif end is None:
if periods is None or start is None:
raise ValueError("must specify 2 of start, end, periods")
end = start + periods * freq
elif periods is None:
if start is None or end is None:
raise ValueError("must specify 2 of start, end, periods")
pass
# must all be same units or None
arr = np.array([start, end, freq])
if is_object_dtype(arr):
raise ValueError("start, end, freq need to be the same type")
return IntervalIndex.from_breaks(np.arange(start, end, freq),
name=name,
closed=closed)
| mit |
JDTimlin/QSO_Clustering | highz_clustering/clustering/Compute_correlation/modified_astroml_code.py | 5 | 7195 | import os
import warnings
import numpy as np
from sklearn.neighbors import BallTree
# Check if scikit-learn's two-point functionality is available.
# This was added in scikit-learn version 0.14
try:
from sklearn.neighbors import KDTree
sklearn_has_two_point = True
except ImportError:
import warnings
sklearn_has_two_point = False
def uniform_sphere(RAlim, DEClim, size=1):
"""Draw a uniform sample on a sphere
Parameters
----------
RAlim : tuple
select Right Ascension between RAlim[0] and RAlim[1]
units are degrees
DEClim : tuple
select Declination between DEClim[0] and DEClim[1]
size : int (optional)
the size of the random arrays to return (default = 1)
Returns
-------
RA, DEC : ndarray
the random sample on the sphere within the given limits.
arrays have shape equal to size.
"""
zlim = np.sin(np.pi * np.asarray(DEClim) / 180.)
z = zlim[0] + (zlim[1] - zlim[0]) * np.random.random(size)
DEC = (180. / np.pi) * np.arcsin(z)
RA = RAlim[0] + (RAlim[1] - RAlim[0]) * np.random.random(size)
return RA, DEC
def ra_dec_to_xyz(ra, dec):
"""Convert ra & dec to Euclidean points
Parameters
----------
ra, dec : ndarrays
Returns
x, y, z : ndarrays
"""
sin_ra = np.sin(ra * np.pi / 180.)
cos_ra = np.cos(ra * np.pi / 180.)
sin_dec = np.sin(np.pi / 2 - dec * np.pi / 180.)
cos_dec = np.cos(np.pi / 2 - dec * np.pi / 180.)
return (cos_ra * sin_dec,
sin_ra * sin_dec,
cos_dec)
def angular_dist_to_euclidean_dist(D, r=1):
"""convert angular distances to euclidean distances"""
return 2 * r * np.sin(0.5 * D * np.pi / 180.)
def two_point(data, bins, method='standard',
data_R=None, random_state=None):
"""Two-point correlation function
Parameters
----------
data : array_like
input data, shape = [n_samples, n_features]
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
method : string
"standard" or "landy-szalay".
data_R : array_like (optional)
if specified, use this as the random comparison sample
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr : ndarray
the estimate of the correlation function within each bin
shape = Nbins
"""
data = np.asarray(data)
bins = np.asarray(bins)
#rng = check_random_state(random_state)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if data.ndim == 1:
data = data[:, np.newaxis]
elif data.ndim != 2:
raise ValueError("data should be 1D or 2D")
n_samples, n_features = data.shape
Nbins = len(bins) - 1
# shuffle all but one axis to get background distribution
if data_R is None:
data_R = data.copy()
for i in range(n_features - 1):
rng.shuffle(data_R[:, i])
else:
data_R = np.asarray(data_R)
if (data_R.ndim != 2) or (data_R.shape[-1] != n_features):
raise ValueError('data_R must have same n_features as data')
factor = len(data_R) * 1. / len(data)
if sklearn_has_two_point:
# Fast two-point correlation functions added in scikit-learn v. 0.14
KDT_D = KDTree(data)
KDT_R = KDTree(data_R)
counts_DD = KDT_D.two_point_correlation(data, bins)
counts_RR = KDT_R.two_point_correlation(data_R, bins)
else:
warnings.warn("Version 0.3 of astroML will require scikit-learn "
"version 0.14 or higher for correlation function "
"calculations. Upgrade to sklearn 0.14+ now for much "
"faster correlation function calculations.")
BT_D = BallTree(data)
BT_R = BallTree(data_R)
counts_DD = np.zeros(Nbins + 1)
counts_RR = np.zeros(Nbins + 1)
for i in range(Nbins + 1):
counts_DD[i] = np.sum(BT_D.query_radius(data, bins[i],
count_only=True))
counts_RR[i] = np.sum(BT_R.query_radius(data_R, bins[i],
count_only=True))
DD = np.diff(counts_DD)
RR = np.diff(counts_RR)
# check for zero in the denominator
RR_zero = (RR == 0)
RR[RR_zero] = 1
if method == 'standard':
corr = factor ** 2 * DD / RR - 1
elif method == 'landy-szalay':
if sklearn_has_two_point:
counts_DR = KDT_R.two_point_correlation(data, bins)
else:
counts_DR = np.zeros(Nbins + 1)
for i in range(Nbins + 1):
counts_DR[i] = np.sum(BT_R.query_radius(data, bins[i],
count_only=True))
DR = np.diff(counts_DR)
corr = (factor ** 2 * DD - 2 * factor * DR + RR) / RR
corr[RR_zero] = np.nan
return DD,RR,DR,corr
def two_point_angular(data, rand, bins, method='standard', random_state=None):
"""Angular two-point correlation function
A separate function is needed because angular distances are not
euclidean, and random sampling needs to take into account the
spherical volume element.
Parameters
----------
ra : array_like
input right ascention, shape = (n_samples,)
dec : array_like
input declination
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
method : string
"standard" or "landy-szalay".
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr : ndarray
the estimate of the correlation function within each bin
shape = Nbins
"""
#ra = np.asarray(ra)
#dec = np.asarray(dec)
#rng = check_random_state(random_state)
ra = data[:,0]
dec = data[:,1]
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if (ra.ndim != 1) or (dec.ndim != 1) or (ra.shape != dec.shape):
raise ValueError('ra and dec must be 1-dimensional '
'arrays of the same length')
n_features = len(ra)
Nbins = len(bins) - 1
# draw a random sample with N points
#ra_R, dec_R = uniform_sphere((min(ra), max(ra)),(min(dec), max(dec)),2 * len(ra))
ra_R = rand[:,0]
dec_R = rand[:,1]
data = np.asarray(ra_dec_to_xyz(ra, dec), order='F').T
data_R = np.asarray(ra_dec_to_xyz(ra_R, dec_R), order='F').T
# convert spherical bins to cartesian bins
bins_transform = angular_dist_to_euclidean_dist(bins/60.)
return two_point(data, bins_transform, method=method,
data_R=data_R, random_state=None)
| mit |
has2k1/numpy | numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/utils/testing.py | 84 | 24860 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
kiyoto/statsmodels | statsmodels/examples/ex_outliers_influence.py | 34 | 3906 |
from __future__ import print_function
import numpy as np
import statsmodels.stats.outliers_influence as oi
if __name__ == '__main__':
import statsmodels.api as sm
data = np.array('''\
64 57 8
71 59 10
53 49 6
67 62 11
55 51 8
58 50 7
77 55 10
57 48 9
56 42 10
51 42 6
76 61 12
68 57 9'''.split(), float).reshape(-1,3)
varnames = 'weight height age'.split()
endog = data[:,0]
exog = sm.add_constant(data[:,2])
res_ols = sm.OLS(endog, exog).fit()
hh = (res_ols.model.exog * res_ols.model.pinv_wexog.T).sum(1)
x = res_ols.model.exog
hh_check = np.diag(np.dot(x, np.dot(res_ols.model.normalized_cov_params, x.T)))
from numpy.testing import assert_almost_equal
assert_almost_equal(hh, hh_check, decimal=13)
res = res_ols #alias
#http://en.wikipedia.org/wiki/PRESS_statistic
#predicted residuals, leave one out predicted residuals
resid_press = res.resid / (1-hh)
ess_press = np.dot(resid_press, resid_press)
sigma2_est = np.sqrt(res.mse_resid) #can be replace by different estimators of sigma
sigma_est = np.sqrt(sigma2_est)
resid_studentized = res.resid / sigma_est / np.sqrt(1 - hh)
#http://en.wikipedia.org/wiki/DFFITS:
dffits = resid_studentized * np.sqrt(hh / (1 - hh))
nobs, k_vars = res.model.exog.shape
#Belsley, Kuh and Welsch (1980) suggest a threshold for abs(DFFITS)
dffits_threshold = 2 * np.sqrt(k_vars/nobs)
res_ols.df_modelwc = res_ols.df_model + 1
n_params = res.model.exog.shape[1]
#http://en.wikipedia.org/wiki/Cook%27s_distance
cooks_d = res.resid**2 / sigma2_est / res_ols.df_modelwc * hh / (1 - hh)**2
#or
#Eubank p.93, 94
cooks_d2 = resid_studentized**2 / res_ols.df_modelwc * hh / (1 - hh)
#threshold if normal, also Wikipedia
from scipy import stats
alpha = 0.1
#df looks wrong
print(stats.f.isf(1-alpha, n_params, res.df_resid))
print(stats.f.sf(cooks_d, n_params, res.df_resid))
print('Cooks Distance')
print(cooks_d)
print(cooks_d2)
doplot = 0
if doplot:
import matplotlib.pyplot as plt
fig = plt.figure()
# ax = fig.add_subplot(3,1,1)
# plt.plot(andrew_results.weights, 'o', label='rlm weights')
# plt.legend(loc='lower left')
ax = fig.add_subplot(3,1,2)
plt.plot(cooks_d, 'o', label="Cook's distance")
plt.legend(loc='upper left')
ax2 = fig.add_subplot(3,1,3)
plt.plot(resid_studentized, 'o', label='studentized_resid')
plt.plot(dffits, 'o', label='DFFITS')
leg = plt.legend(loc='lower left', fancybox=True)
leg.get_frame().set_alpha(0.5) #, fontsize='small')
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize='small') # the legend text fontsize
print(oi.reset_ramsey(res, degree=3))
#note, constant in last column
for i in range(1):
print(oi.variance_inflation_factor(res.model.exog, i))
infl = oi.OLSInfluence(res_ols)
print(infl.resid_studentized_external)
print(infl.resid_studentized_internal)
print(infl.summary_table())
print(oi.summary_table(res, alpha=0.05)[0])
'''
>>> res.resid
array([ 4.28571429, 4. , 0.57142857, -3.64285714,
-4.71428571, 1.92857143, 10. , -6.35714286,
-11. , -1.42857143, 1.71428571, 4.64285714])
>>> infl.hat_matrix_diag
array([ 0.10084034, 0.11764706, 0.28571429, 0.20168067, 0.10084034,
0.16806723, 0.11764706, 0.08403361, 0.11764706, 0.28571429,
0.33613445, 0.08403361])
>>> infl.resid_press
array([ 4.76635514, 4.53333333, 0.8 , -4.56315789,
-5.24299065, 2.31818182, 11.33333333, -6.94036697,
-12.46666667, -2. , 2.58227848, 5.06880734])
>>> infl.ess_press
465.98646628086374
'''
| bsd-3-clause |
dchad/malware-detection | vs/feature_extraction_packer_id.py | 1 | 7055 | # Generate packer id values for PE files.
#
# Inputs: av-packer-id-labels.csv (scalar labels for PE packer types.)
# userdb-sans.txt (PEid.exe PE packer database.)
#
# Output: xxxx-sorted-packer-id-features.csv ( 4 x feature files )
# row format = [file_name, packer_name, packer_id, valid_pe, is_packed]
#
# Combined into one feature file:
# sorted-packer-id-features.csv
# row format = [file_name, packer_name, packer_id, valid_pe, is_packed]
#
#
# Author: Derek Chadwick
# Date : 04/08/2016
from multiprocessing import Pool
import os
import peutils
import pefile
import sys
import re
import pandas as pd
def load_packer_id_map():
# Load the packer ID scalar labels and create a map. There are a lot of duplicate names so the total is less than
# the number of packers listed in the signature db.
packer_id_map = {}
counter = 0
fip = open('data/av-packer-id-labels.csv','r')
in_lines = fip.readlines()
for idx in range(1,len(in_lines)):
tokens = in_lines[idx].split(',')
packer_name = tokens[0]
if packer_name not in packer_id_map.keys():
packer_id_map[packer_name] = int(tokens[1])
counter += 1
fip.close()
print('Completed {:d} packer IDs.'.format(counter))
return packer_id_map
def sort_and_save_packer_id_feature_file():
# Load in the combined feature files, sort and save.
# NOTE: add a file name argument so the final filename can be
# specified during runs on multiple datasets.
packers = pd.read_csv('data/packer-id-features.csv')
# DataFrame.sort() is deprecated, but this is an old version of pandas, does not have sort_values().
sorted_packers = packers.sort('file_name')
sorted_packers.to_csv('data/sorted-packer-id-features.csv', index=False)
sorted_packers.head(20)
return
def combine_packer_id_files():
# Function to combine the four packer id files in one file
# 1. list data directory
# 2. For each file in file list that matches (\d\d\d\d-packer-id-features.csv)
# 3. Trim the filenames if necessary (should remove VirusShare_ prefix).
# 4. Concatenate the unsorted packer id feature files.
# 5. Sort and write to data/sorted-packer-id-features.csv
# NOTE: add a file name argument so the final filename can be
# specified during runs on multiple datasets.
fop = open('data/packer-id-features.csv','w')
fop.write('file_name,packer_name,packer_id,valid_pe,is_packed\n')
p1 = re.compile('\d{3,5}-sorted-packer-id-features.csv') # This is the PID prefix for each file.
file_list = os.listdir('data/')
counter = 0
for file_name in file_list:
if p1.match(file_name):
fip = open('data/' + file_name, 'r')
in_lines = fip.readlines()
#if counter > 0:
# in_lines = in_lines[1:] # skip the column header row
fop.writelines(in_lines)
counter += len(in_lines)
fip.close()
print('Completed combine of {:d} packer ID features.'.format(counter))
fop.close()
sort_and_save_packer_id_feature_file()
return
def generate_sample_packer_id(file_list):
# Generate scalar packer IDs for each sample.
pid = os.getpid()
file_name = "data/" + str(pid) + "-sorted-packer-id-features.csv"
fop = open(file_name,'w')
#fop.write('file_name,packer_type,label,is_valid,is_packed\n') put column headers in during the combine stage.
out_lines = []
packer_id_map = load_packer_id_map()
signatures = peutils.SignatureDatabase('data/userdb-sans.txt')
non_pe_counter = 0
pe_file_counter = 0
exception_counter = 0
signat = 'unknown'
error_str = 'none'
for idx, file_name in enumerate(file_list):
tokens = file_name.split('_')
truncated_file_name = tokens[1] # remove the VirusShare_ prefix from the filename.
matches = None
packer_id = 0
is_valid = 0
is_packed = 0
try:
pe = pefile.PE(ext_drive + file_name, fast_load=True)
pe_file_counter += 1
#matches = signatures.match_all(pe, ep_only = True)
is_valid = 1
try:
if peutils.is_probably_packed(pe): # NOTE: peutils.is_valid() has not been implemented yet.
#is_valid = 1
is_packed = 1
matches = signatures.match(pe, ep_only = True)
signat = matches[0]
if (signat in packer_id_map.keys()):
packer_id = packer_id_map[signat]
else:
packer_id = 0
#signat = signat.replace(',','') # remove commas or they will cause an error when loading dataframes.
# NOTE: If the signature database has commas in the packer name then remove them or they will
# cause problems later on when loading the dataframes.
row = truncated_file_name + "," + signat + "," + str(packer_id) + "," + str(is_valid) + "," + str(is_packed) + "\n"
except:
signat = ",unknown,0," + str(is_valid) + "," + str(is_packed) + "\n"
row = truncated_file_name + signat
pe.close()
except Exception as e:
error_str = str(e)
non_pe_counter += 1
error_str = error_str.replace(',','') # remove commas or they will cause an error when loading dataframes.
signat = "," + error_str + ",0,0,0\n"
row = truncated_file_name + signat
out_lines.append(row)
if (idx % 1000) == 0: # print progress
fop.writelines(out_lines)
out_lines = []
print('{:s} - {:s} - {:d} - {:s}'.format(str(pid),truncated_file_name,idx,signat))
if len(out_lines) > 0:
fop.writelines(out_lines)
out_lines = []
fop.close()
print('{:s} - Completed {:d} non PE files and {:d} PE files.'.format(str(pid), non_pe_counter, pe_file_counter))
return
# Start of script
# TODO: add command line arguments to specify input files.
#ext_drive = '/opt/vs/train1/'
#ext_drive = '/opt/vs/train2/'
#ext_drive = '/opt/vs/train3/'
ext_drive = '/opt/vs/train4/'
#ext_drive = '/opt/vs/apt/'
tfiles = os.listdir(ext_drive)
quart = len(tfiles)/4
train1 = tfiles[:quart]
train2 = tfiles[quart:(2*quart)]
train3 = tfiles[(2*quart):(3*quart)]
train4 = tfiles[(3*quart):]
print("Files({:s}): {:d} - {:d} - {:d}".format(ext_drive, len(tfiles), quart, (len(train1)+len(train2)+len(train3)+len(train4))))
trains = [train1, train2, train3, train4]
p = Pool(4)
p.map(generate_sample_packer_id, trains)
print('Completed processing {:d} files in {:s}.'.format(len(tfiles), ext_drive))
combine_packer_id_files()
| gpl-3.0 |
eichstaedtPTB/PyDynamic | test/test_propagate_MonteCarlo.py | 1 | 5217 | # -*- coding: utf-8 -*-
""" Perform tests on the method *uncertainty.propagate_MonteCarlo*"""
import numpy as np
from pytest import raises
import functools
import scipy
from PyDynamic.misc.testsignals import rect
from PyDynamic.misc.tools import make_semiposdef
from PyDynamic.misc.filterstuff import kaiser_lowpass
#from PyDynamic.misc.noise import power_law_acf, power_law_noise, white_gaussian, ARMA
from PyDynamic.uncertainty.propagate_MonteCarlo import MC, SMC, UMC, ARMA, UMC_generic, _UMCevaluate
import matplotlib.pyplot as plt
##### some definitions for all tests
# parameters of simulated measurement
Fs = 100e3 # sampling frequency (in Hz)
Ts = 1 / Fs # sampling interval length (in s)
# nominal system parameters
fcut = 20e3 # low-pass filter cut-off frequency (6 dB)
L = 100 # filter order
b1 = kaiser_lowpass(L, fcut,Fs)[0]
b2 = kaiser_lowpass(L-20,fcut,Fs)[0]
# uncertain knowledge: cutoff between 19.5kHz and 20.5kHz
runs = 20
FC = fcut + (2*np.random.rand(runs)-1)*0.5e3
B = np.zeros((runs,L+1))
for k in range(runs): # Monte Carlo for filter coefficients of low-pass filter
B[k,:] = kaiser_lowpass(L,FC[k],Fs)[0]
Ub = make_semiposdef(np.cov(B,rowvar=0)) # covariance matrix of MC result
# simulate input and output signals
nTime = 500
time = np.arange(nTime)*Ts # time values
# different cases
sigma_noise = 1e-5
# input signal + run methods
x = rect(time,100*Ts,250*Ts,1.0,noise=sigma_noise)
##### actual tests
def test_MC(visualizeOutput=False):
# run method
y,Uy = MC(x,sigma_noise,b1,[1.0],Ub,runs=runs,blow=b2)
assert len(y) == len(x)
assert Uy.shape == (x.size, x.size)
if visualizeOutput:
# visualize input and mean of system response
plt.plot(time, x)
plt.plot(time, y)
# visualize uncertainty of output
plt.plot(time, y - np.sqrt(np.diag(Uy)), linestyle="--", linewidth=1, color="red")
plt.plot(time, y + np.sqrt(np.diag(Uy)), linestyle="--", linewidth=1, color="red")
plt.show()
# this does not run through yet
#def test_SMC():
# # run method
# y,Uy = SMC(x, sigma_noise, b1, [1.0], Ub, runs=runs)
#
# assert len(y) == len(x)
# assert Uy.shape == (x.size, x.size)
def test_UMC(visualizeOutput=False):
# run method
y, Uy, p025, p975, happr = UMC(x, b1, [1.0], Ub, blow=b2, sigma=sigma_noise, runs=runs, runs_init=10, nbins=10)
assert len(y) == len(x)
assert Uy.shape == (x.size, x.size)
assert p025.shape[1] == len(x)
assert p975.shape[1] == len(x)
assert isinstance(happr, dict)
if visualizeOutput:
# visualize input and mean of system response
plt.plot(time, x)
plt.plot(time, y)
# visualize uncertainty of output
plt.plot(time, y - np.sqrt(np.diag(Uy)), linestyle="--", linewidth=1, color="red")
plt.plot(time, y + np.sqrt(np.diag(Uy)), linestyle="--", linewidth=1, color="red")
# visualize central 95%-quantile
plt.plot(time, p025.T, linestyle=":", linewidth=1, color="gray")
plt.plot(time, p975.T, linestyle=":", linewidth=1, color="gray")
# visualize the bin-counts
key = list(happr.keys())[0]
for ts, be, bc in zip(time, happr[key]["bin-edges"].T, happr[key]["bin-counts"].T):
plt.scatter(ts*np.ones_like(bc), be[1:], bc)
plt.show()
def test_UMC_generic(visualizeOutput=False):
x_shape = (5,6,7)
draw_samples = lambda size: np.random.rand(size, *x_shape)
evaluate = functools.partial(np.mean, axis=1)
# run UMC
y, Uy, happr, output_shape = UMC_generic(draw_samples, evaluate, runs=100, blocksize=20, runs_init=10)
assert y.size == Uy.shape[0]
assert Uy.shape == (y.size, y.size)
assert isinstance(happr, dict)
assert output_shape == (5,7)
# run without parallel computation
y, Uy, happr, output_shape = UMC_generic(draw_samples, evaluate, runs=100, blocksize=20, runs_init=10, n_cpu=1)
assert y.size == Uy.shape[0]
assert Uy.shape == (y.size, y.size)
assert isinstance(happr, dict)
assert output_shape == (5,7)
# run again, but only return all simulations
y, Uy, happr, output_shape, sims = UMC_generic(draw_samples, evaluate, runs=100, blocksize=20, runs_init=10, return_samples=True)
assert y.size == Uy.shape[0]
assert Uy.shape == (y.size, y.size)
assert isinstance(happr, dict)
assert output_shape == (5,7)
assert isinstance(sims, dict)
assert sims["samples"][0].shape == x_shape
assert sims["results"][0].shape == output_shape
def test_compare_MC_UMC():
np.random.seed(12345)
y_MC, Uy_MC = MC(x,sigma_noise,b1,[1.0],Ub,runs=2*runs,blow=b2)
y_UMC, Uy_UMC, _, _, _ = UMC(x, b1, [1.0], Ub, blow=b2, sigma=sigma_noise, runs=2*runs, runs_init=10)
# both methods should yield roughly the same results
assert np.allclose(y_MC, y_UMC, atol=5e-4)
assert np.allclose(Uy_MC, Uy_UMC, atol=5e-4)
def test_noise_ARMA():
length = 100
phi = [1/3, 1/4, 1/5]
theta = [1, -1 ]
e = ARMA(length, phi = phi, theta = theta)
assert len(e) == length
| lgpl-3.0 |
cainiaocome/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
lapierreM/Yoruba_demography | Programs/plot/plot_SFS_fitting.py | 1 | 3207 | #!/anaconda/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016 Lapierre/Marguerite
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation; either version 2.1
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
for more information, please contact: [email protected]
"""
import re
import sys
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.ticker import MultipleLocator
from matplotlib.font_manager import FontProperties
#------------------------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------------------------
#
#Read parameter values in file
#
with open(sys.argv[1],"r") as file_params:
for line in file_params:
list=line.split('=')
if list[0][:5]=='clear':
clear=int(list[1])
elif list[0][:8]=='fit_file':
fit_file=re.sub(r'"','',(str(list[1][:-1]).strip()))
elif list[0][:11]=='figure_name':
figure_name=re.sub(r'"','',(list[1][:-1].strip()))
t=[]
t_BD=[]
BD=[]
lK=[]
cK=[]
bnK=[]
eK=[]
#
# plot parameters and colors
#
minorLocator = MultipleLocator(0.1)
gris=(127/255.0,127/255.0,127/255.0)
violet=(110/255.0,16/255.0,144/255.0)
vert=(110/255.0,192/255.0,56/255.0)
jaune=(226/255.0,184/255.0,1/255.0)
bleu=(54/255.0,125/255.0,162/255.0)
rouge=(207/255.0,35/255.0,43/255.0)
#
# Read the .fit file
#
with open(fit_file,"r") as file_fit:
for ligne in file_fit:
liste=re.split(' |\n',ligne)
if ligne[0]!='#' and len(liste)==7:
t.append(liste[0])
BD.append(liste[1])
lK.append(liste[2])
cK.append(liste[3])
bnK.append(liste[4])
eK.append(float(liste[5]))
t_ek=[1/float(x) for x in t]
t_BD=[float(x)*2 for x in t] #time scaling 2 to put Birth-Death on the same time scale as Kingman models
rcParams.update({'font.size': 18})
FontProperties().set_family('sans-serif')
plt.figure(1)
plt.plot(t_BD,BD,color=rouge,label="Birth-death",linewidth=2)
plt.plot(t,lK,color=vert,label="Linear Kingman",linewidth=2)
plt.plot(t,cK,color=bleu,label="Conditionned Kingman",linewidth=2)
plt.plot(t,bnK,color=violet,label="Bottleneck Kingman",linewidth=2)
plt.plot(t_ek,eK,color=jaune,label="Exponential Kingman",linewidth=2)
plt.yscale('log')
#plt.xlim([0.8,3.0]) #if specific bounds for the x axis are needed
ax = plt.gca()
ax.xaxis.set_minor_locator(minorLocator)
ax.tick_params(width=1, length=7,which='major')
ax.tick_params(width=1, length=4,which='minor')
ax.get_xaxis().tick_bottom() # remove unneeded ticks
ax.get_yaxis().tick_left()
plt.savefig(figure_name,format='eps')
plt.show()
| lgpl-2.1 |
evan-magnusson/dynamic | Data/Calibration/Firm_Calibration_Python/parameters/employment/processing/read_wages_data.py | 6 | 4160 | '''
-------------------------------------------------------------------------------
Date created: 5/22/2015
Last updated 5/22/2015
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
Packages:
-------------------------------------------------------------------------------
'''
import os.path
import sys
sys.path.append(os.path.abspath("N:\Lott, Sherwin\Other Calibration\Program"))
import numpy as np
import pandas as pd
import xlrd
#
import naics_processing as naics
'''
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
'''
# Defining constant names:
WAGES = "WAGES"
#
def load_nipa_wages_ind(data_folder,tree = None):
wages_ind_file = os.path.abspath(data_folder + "\\Wages--Industry.xls")
wages_ind_cross_file = os.path.abspath(data_folder + "\\Wages--Industry_Crosswalk.csv")
#
data = load_nipa_ind(wages_ind_file, wages_ind_cross_file)
data.columns = ["NAICS_Code", WAGES]
#
conversion_factor = 1.0
for i in xrange(0, data.shape[0]):
data[WAGES][i] *= conversion_factor
if tree == None:
return data
naics_data_to_tree(tree, data, WAGES)
def load_nipa_ind(data_file, cross_file):
#data_folder = "N:\\Lott, Sherwin\\Other Calibration\\Program\\national_income\\data"
data_book = xlrd.open_workbook(data_file)
data_sht = data_book.sheet_by_index(0)
#
data_cross = pd.read_csv(cross_file)
#data_cross = data_cross.fillna(-1)
#data_cross = pd.DataFrame(data_cross[data_cross["NAICS Code:"] != -1])
output = np.zeros(data_cross.shape[0])
start_pos = naics.search_ws(data_sht, "Line", 25, True, [0,0], True)
for i in xrange(start_pos[0]+1, data_sht.nrows):
if(str(data_sht.cell_value(i,start_pos[1])) == "1"):
start_pos[0] = i
break
cur_row = start_pos[0]
ind_col = start_pos[1] + 1
data_col = data_sht.ncols - 1
for i in xrange(0, data_sht.ncols):
try:
float(data_sht.cell_value(cur_row, data_col))
break
except ValueError:
data_col -= 1
for i in xrange(0, data_cross.shape[0]):
for j in xrange(start_pos[0], data_sht.nrows):
try:
if(data_cross["Industry"][i] in data_sht.cell_value(cur_row, ind_col)):
output[i] = data_sht.cell_value(cur_row, data_col)
cur_row = start_pos[0] + ((cur_row+1-start_pos[0]) % (data_sht.nrows-start_pos[0]))
break
cur_row = start_pos[0] + ((cur_row+1-start_pos[0]) % (data_sht.nrows-start_pos[0]))
except ValueError:
cur_row = start_pos[0] + ((cur_row+1-start_pos[0]) % (data_sht.nrows-start_pos[0]))
return pd.DataFrame(np.column_stack((data_cross["NAICS_Code"], output)), columns = ["NAICS Codes:", ""])
def naics_data_to_tree(tree, df, df_name = "", bp_tree = None, bp_df = None):
#
for i in tree.enum_inds:
i.append_dfs((df_name, pd.DataFrame(np.zeros((1,len(df.columns[1:]))),
columns = df.columns[1:])))
#
enum_index = 0
#
for i in xrange(0, len(tree.enum_inds)):
cur_ind = tree.enum_inds[i]
cur_dfs = cur_ind.data.dfs
tot_share = 0
for j in xrange(0, df.shape[0]):
if df["NAICS_Code"][j] != df["NAICS_Code"][j]:
continue
df_code = df["NAICS_Code"][j]
df_code = df_code.split(".")
cur_share = naics.compare_codes(df_code, cur_dfs["Codes:"].iloc[:,0])
if cur_share == 0:
continue
tot_share += cur_share
#
for k in xrange(1, df.shape[1]):
cur_dfs[df_name].iloc[0,k-1] = df.iloc[j,k] #Removed cur_share
#
if tot_share == 1:
break
enum_index = (enum_index+1) % len(tree.enum_inds)
| mit |
richford/AFQ-viz | afqbrowser/tests/test_browser.py | 3 | 1236 | import os.path as op
import afqbrowser as afqb
import tempfile
import json
import pandas as pd
import numpy.testing as npt
def test_assemble():
data_path = op.join(afqb.__path__[0], 'site')
tdir = tempfile.mkdtemp()
afqb.assemble(op.join(data_path, 'client', 'data', 'afq.mat'),
target=tdir, title='', subtitle='', link='', sublink='')
# Check for regression against know results:
out_data = op.join(tdir, 'AFQ-browser', 'client', 'data')
params_file = op.join(out_data, 'params.json')
params = json.loads(open(params_file).read())
npt.assert_equal(params['analysis_params']['track']['stepSizeMm'], 1)
nodes_file = op.join(out_data, 'nodes.csv')
nodes = pd.read_csv(nodes_file)
npt.assert_almost_equal(nodes['fa'][0], 0.4529922120694605)
def test_tracula():
data_path = op.join(afqb.__path__[0], 'site', 'client',
'data', 'tracula_data')
stats_dir = op.join(data_path, 'stats')
tdir = tempfile.mkdtemp()
nodes_fname, meta_fname, streamlines_fname, params_fname =\
afqb.tracula2nodes(stats_dir, out_path=tdir)
# Test for regressions:
nodes = pd.read_csv(nodes_fname)
npt.assert_equal(nodes.shape, (2643, 11))
| bsd-3-clause |
wathen/PhD | MHD/FEniCS/CGns/da_serial.py | 1 | 2866 | # Summary
# Basic use of distributed arrays communication data structures in PETSc.
#
# Examples
# Direct solve:
# $ python da_serial.py -ksp_monitor -ksp_type preonly -pc_type lu
#
# Iterative solve:
# $ python da_serial.py -ksp_monitor -ksp_type bcgs
#
# Description
# DAs are extremely useful when working simulations that are discretized
# on a structured grid. DAs don't actually hold data; instead, they are
# templates for distributing and communicating information (matrices and
# vectors) across a parallel system.
#
# In this example, we set up a simple 2D wave equation with mirror
# boundary conditions. The solution, given a source at the center of the
# grid, is solved using a ksp object.
#
# Note that this example is uniprocessor only, so there is nothing
# "distributed" about the DA. Use this as a stepping stone to working with
# DAs in a parallel setting.
#
# For more information, consult the PETSc user manual.
# Also, look at the petsc4py/src/PETSc/DA.pyx file.
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from matplotlib import pylab
# Dimensions of the 2D grid.
nx = 101
ny = 101
w = 2./10. # Angular frequency of wave (2*pi / period).
# Create the DA.
da = PETSc.DA().create([nx, ny], \
stencil_width=1, \
boundary_type=('ghosted', 'ghosted'))
# Create the rhs vector based on the DA.
b = da.createGlobalVec()
b_val = da.getVecArray(b) # Obtain access to elements of b.
b_val[50, 50] = 1; # Set central value to 1.
# Create (a vector to store) the solution vector.
x = da.createGlobalVec()
# Create the matrix.
A = da.getMatrix('aij')
# Stencil objects make it easy to set the values of the matrix elements.
row = PETSc.Mat.Stencil()
col = PETSc.Mat.Stencil()
# Set matrix elements to correct values.
(i0, i1), (j0, j1) = da.getRanges()
for j in range(j0, j1):
for i in range(i0, i1):
row.index = (i, j)
for index, value in [((i, j), -4 + w**2),
((i-1, j), 1),
((i+1, j), 1),
((i, j-1), 1),
((i, j+1), 1)]:
col.index = index
A.setValueStencil(row, col, value) # Sets a single matrix element.
A.assemblyBegin() # Make matrices useable.
A.assemblyEnd()
# Initialize ksp solver.
ksp = PETSc.KSP().create()
ksp.setOperators(A)
# Allow for solver choice to be set from command line with -ksp_type <solver>.
# Recommended option: -ksp_type preonly -pc_type lu
ksp.setFromOptions()
print 'Solving with:', ksp.getType()
# Solve!
ksp.solve(b, x)
# Plot solution, which is wave-like, although boundaries cause reflections.
pylab.contourf(da.getVecArray(x)[:])
pylab.show()
| mit |
correlator/spinmob | _pylab_tweaks.py | 2 | 55292 | import os as _os
import pylab as _pylab
import time as _time
import thread as _thread
import matplotlib as _mpl
import numpy as _n
import _functions as _fun
import _pylab_colormap
import spinmob as _s
image_colormap = _pylab_colormap.colormap_interface
from matplotlib.font_manager import FontProperties as _FontProperties
if __name__ == '__main__':
import _settings
_settings = _settings.settings()
line_attributes = ["linestyle","linewidth","color","marker","markersize","markerfacecolor","markeredgewidth","markeredgecolor"]
image_undo_list = []
def add_text(text, x=0.01, y=0.01, axes="gca", draw=True, **kwargs):
"""
Adds text to the axes at the specified position.
**kwargs go to the axes.text() function.
"""
if axes=="gca": axes = _pylab.gca()
axes.text(x, y, text, transform=axes.transAxes, **kwargs)
if draw: _pylab.draw()
def auto_zoom(zoomx=1, zoomy=1, axes="gca", x_space=0.04, y_space=0.04, draw=True):
"""
Looks at the bounds of the plotted data and zooms accordingly, leaving some
space around the data.
"""
_pylab.ioff()
if axes=="gca": axes = _pylab.gca()
a = axes
# get all the lines
lines = a.get_lines()
# get the current limits, in case we're not zooming one of the axes.
x1, x2 = a.get_xlim()
y1, y2 = a.get_ylim()
xdata = []
ydata = []
for n in range(0,len(lines)):
# store this line's data
# build up a huge data array
if isinstance(lines[n], _mpl.lines.Line2D):
x, y = lines[n].get_data()
for n in range(len(x)):
# if we're not zooming x and we're in range, append
if not zoomx and x[n] >= x1 and x[n] <= x2:
xdata.append(x[n])
ydata.append(y[n])
elif not zoomy and y[n] >= y1 and y[n] <= y2:
xdata.append(x[n])
ydata.append(y[n])
elif zoomy and zoomx:
xdata.append(x[n])
ydata.append(y[n])
if len(xdata):
xmin = min(xdata)
xmax = max(xdata)
ymin = min(ydata)
ymax = max(ydata)
# we want a 3% white space boundary surrounding the data in our plot
# so set the range accordingly
if zoomx: a.set_xlim(xmin-x_space*(xmax-xmin), xmax+x_space*(xmax-xmin))
if zoomy: a.set_ylim(ymin-y_space*(ymax-ymin), ymax+y_space*(ymax-ymin))
if draw:
_pylab.ion()
_pylab.draw()
else:
return
def click_estimate_slope():
"""
Takes two clicks and returns the slope.
Right-click aborts.
"""
c1 = _pylab.ginput()
if len(c1)==0:
return None
c2 = _pylab.ginput()
if len(c2)==0:
return None
return (c1[0][1]-c2[0][1])/(c1[0][0]-c2[0][0])
def click_estimate_curvature():
"""
Takes two clicks and returns the curvature, assuming the first click
was the minimum of a parabola and the second was some other point.
Returns the second derivative of the function giving this parabola.
Right-click aborts.
"""
c1 = _pylab.ginput()
if len(c1)==0:
return None
c2 = _pylab.ginput()
if len(c2)==0:
return None
return 2*(c2[0][1]-c1[0][1])/(c2[0][0]-c1[0][0])**2
def click_estimate_difference():
"""
Takes two clicks and returns the difference vector [dx, dy].
Right-click aborts.
"""
c1 = _pylab.ginput()
if len(c1)==0:
return None
c2 = _pylab.ginput()
if len(c2)==0:
return None
return [c2[0][0]-c1[0][0], c2[0][1]-c1[0][1]]
def differentiate_shown_data(neighbors=1, fyname=1, **kwargs):
"""
Differentiates the data visible on the specified axes using
fun.derivative_fit() (if neighbors > 0), and derivative() otherwise.
Modifies the visible data using manipulate_shown_data(**kwargs)
"""
if neighbors:
def D(x,y): return _fun.derivative_fit(x,y,neighbors)
else:
def D(x,y): return _fun.derivative(x,y)
if fyname==1: fyname = '$\\partial_{x(\\pm'+str(neighbors)+')}$'
manipulate_shown_data(D, fxname=None, fyname=fyname, **kwargs)
def format_figure(figure=None, tall=False, draw=True):
"""
This formats the figure in a compact way with (hopefully) enough useful
information for printing large data sets. Used mostly for line and scatter
plots with long, information-filled titles.
Chances are somewhat slim this will be ideal for you but it very well might
and is at least a good starting point.
figure=None specify a figure object. None will use gcf()
"""
_pylab.ioff()
if figure == None: figure = _pylab.gcf()
if tall: set_figure_window_geometry(figure, (0,0), (550,700))
else: set_figure_window_geometry(figure, (0,0), (550,400))
legend_position=1.01
# first, find overall bounds of all axes.
ymin = 1.0
ymax = 0.0
xmin = 1.0
xmax = 0.0
for axes in figure.get_axes():
(x,y,dx,dy) = axes.get_position().bounds
if y < ymin: ymin = y
if y+dy > ymax: ymax = y+dy
if x < xmin: xmin = x
if x+dx > xmax: xmax = x+dx
# Fraction of the figure's width and height to use for all the plots.
w = 0.55
h = 0.75
# buffers on left and bottom edges
bb = 0.12
bl = 0.12
xscale = w / (xmax-xmin)
yscale = h / (ymax-ymin)
# save this for resetting
current_axes = _pylab.gca()
# loop over the axes
for axes in figure.get_axes():
(x,y,dx,dy) = axes.get_position().bounds
y = bb + (y-ymin)*yscale
dy = dy * yscale
x = bl + (x-xmin)*xscale
dx = dx * xscale
axes.set_position([x,y,dx,dy])
# set the position of the legend
_pylab.axes(axes) # set the current axes
if len(axes.lines)>0:
_pylab.legend(loc=[legend_position, 0], borderpad=0.02, prop=_FontProperties(size=7))
# set the label spacing in the legend
if axes.get_legend():
axes.get_legend().labelsep = 0.01
axes.get_legend().set_visible(1)
# set up the title label
axes.title.set_horizontalalignment('right')
axes.title.set_size(8)
axes.title.set_position([1.5,1.02])
axes.title.set_visible(1)
#axes.yaxis.label.set_horizontalalignment('center')
#axes.xaxis.label.set_horizontalalignment('center')
_pylab.axes(current_axes)
if draw:
_pylab.ion()
_pylab.draw()
def get_figure_window(figure='gcf'):
"""
This will search through the windows and return the one containing the figure
"""
if figure == 'gcf': figure = _pylab.gcf()
return figure.canvas.GetParent()
def get_figure_window_geometry(fig='gcf'):
"""
This will currently only work for Qt4Agg and WXAgg backends.
Returns position, size
postion = [x, y]
size = [width, height]
fig can be 'gcf', a number, or a figure object.
"""
if type(fig)==str: fig = _pylab.gcf()
elif _fun.is_a_number(fig): fig = _pylab.figure(fig)
# Qt4Agg backend. Probably would work for other Qt stuff
if _pylab.get_backend().find('Qt') >= 0:
size = fig.canvas.window().size()
pos = fig.canvas.window().pos()
return [[pos.x(),pos.y()], [size.width(),size.height()]]
else:
print "get_figure_window_geometry() only implemented for QtAgg backend."
return None
def image_format_figure(figure=None, draw=True):
"""
This formats the figure in a compact way with (hopefully) enough useful
information for printing large data sets. Used mostly for line and scatter
plots with long, information-filled titles.
Chances are somewhat slim this will be ideal for you but it very well might
and is at least a good starting point.
figure=None specify a figure object. None will use gcf()
"""
_pylab.ioff()
if figure == None: figure = _pylab.gcf()
set_figure_window_geometry(figure, (0,0), (550,470))
axes = figure.axes[0]
# set up the title label
axes.title.set_horizontalalignment('right')
axes.title.set_size(8)
axes.title.set_position([1.27,1.02])
axes.title.set_visible(1)
if draw:
_pylab.ion()
_pylab.draw()
def impose_legend_limit(limit=30, axes="gca", **kwargs):
"""
This will erase all but, say, 30 of the legend entries and remake the legend.
You'll probably have to move it back into your favorite position at this point.
"""
if axes=="gca": axes = _pylab.gca()
# make these axes current
_pylab.axes(axes)
# loop over all the lines_pylab.
for n in range(0,len(axes.lines)):
if n > limit-1 and not n==len(axes.lines)-1: axes.lines[n].set_label("_nolegend_")
if n == limit-1 and not n==len(axes.lines)-1: axes.lines[n].set_label("...")
_pylab.legend(**kwargs)
def image_autozoom(axes="gca"):
if axes=="gca": axes = _pylab.gca()
# get the extent
extent = axes.images[0].get_extent()
# rezoom us
axes.set_xlim(extent[0],extent[1])
axes.set_ylim(extent[2],extent[3])
_pylab.draw()
def image_coarsen(xlevel=0, ylevel=0, image="auto", method='average'):
"""
This will coarsen the image data by binning each xlevel+1 along the x-axis
and each ylevel+1 points along the y-axis
type can be 'average', 'min', or 'max'
"""
if image == "auto": image = _pylab.gca().images[0]
Z = _n.array(image.get_array())
# store this image in the undo list
global image_undo_list
image_undo_list.append([image, Z])
if len(image_undo_list) > 10: image_undo_list.pop(0)
# images have transposed data
image.set_array(_fun.coarsen_matrix(Z, ylevel, xlevel, method))
# update the plot
_pylab.draw()
def image_neighbor_smooth(xlevel=0.2, ylevel=0.2, image="auto"):
"""
This will bleed nearest neighbor pixels into each other with
the specified weight factors.
"""
if image == "auto": image = _pylab.gca().images[0]
Z = _n.array(image.get_array())
# store this image in the undo list
global image_undo_list
image_undo_list.append([image, Z])
if len(image_undo_list) > 10: image_undo_list.pop(0)
# get the diagonal smoothing level (eliptical, and scaled down by distance)
dlevel = ((xlevel**2+ylevel**2)/2.0)**(0.5)
# don't touch the first column
new_Z = [Z[0]*1.0]
for m in range(1,len(Z)-1):
new_Z.append(Z[m]*1.0)
for n in range(1,len(Z[0])-1):
new_Z[-1][n] = (Z[m,n] + xlevel*(Z[m+1,n]+Z[m-1,n]) + ylevel*(Z[m,n+1]+Z[m,n-1]) \
+ dlevel*(Z[m+1,n+1]+Z[m-1,n+1]+Z[m+1,n-1]+Z[m-1,n-1]) ) \
/ (1.0+xlevel*2+ylevel*2 + dlevel*4)
# don't touch the last column
new_Z.append(Z[-1]*1.0)
# images have transposed data
image.set_array(_n.array(new_Z))
# update the plot
_pylab.draw()
def image_undo():
"""
Undoes the last coarsen or smooth command.
"""
if len(image_undo_list) <= 0:
print "no undos in memory"
return
[image, Z] = image_undo_list.pop(-1)
image.set_array(Z)
_pylab.draw()
def image_set_aspect(aspect=1.0, axes="gca"):
"""
sets the aspect ratio of the current zoom level of the imshow image
"""
if axes is "gca": axes = _pylab.gca()
# make sure it's not in "auto" mode
if type(axes.get_aspect()) == str: axes.set_aspect(1.0)
_pylab.draw() # this makes sure the window_extent is okay
axes.set_aspect(aspect*axes.get_aspect()*axes.get_window_extent().width/axes.get_window_extent().height)
_pylab.draw()
def image_set_extent(x=None, y=None, axes="gca"):
"""
Set's the first image's extent, then redraws.
Examples:
x = [1,4]
y = [33.3, 22]
"""
if axes == "gca": axes = _pylab.gca()
# get the current plot limits
xlim = axes.get_xlim()
ylim = axes.get_ylim()
# get the old extent
extent = axes.images[0].get_extent()
# calculate the fractional extents
x0 = extent[0]
y0 = extent[2]
xwidth = extent[1]-x0
ywidth = extent[3]-y0
frac_x1 = (xlim[0]-x0)/xwidth
frac_x2 = (xlim[1]-x0)/xwidth
frac_y1 = (ylim[0]-y0)/ywidth
frac_y2 = (ylim[1]-y0)/ywidth
# set the new
if not x == None:
extent[0] = x[0]
extent[1] = x[1]
if not y == None:
extent[2] = y[0]
extent[3] = y[1]
# get the new zoom window
x0 = extent[0]
y0 = extent[2]
xwidth = extent[1]-x0
ywidth = extent[3]-y0
x1 = x0 + xwidth*frac_x1
x2 = x0 + xwidth*frac_x2
y1 = y0 + ywidth*frac_y1
y2 = y0 + ywidth*frac_y2
# set the extent
axes.images[0].set_extent(extent)
# rezoom us
axes.set_xlim(x1,x2)
axes.set_ylim(y1,y2)
# draw
image_set_aspect(1.0)
def image_scale(xscale=1.0, yscale=1.0, axes="gca"):
"""
Scales the image extent.
"""
if axes == "gca": axes = _pylab.gca()
e = axes.images[0].get_extent()
x1 = e[0]*xscale
x2 = e[1]*xscale
y1 = e[2]*yscale
y2 = e[3]*yscale
image_set_extent([x1,x2],[y1,y2], axes)
def image_click_xshift(axes = "gca"):
"""
Takes a starting and ending point, then shifts the image y by this amount
"""
if axes == "gca": axes = _pylab.gca()
try:
p1 = _pylab.ginput()
p2 = _pylab.ginput()
xshift = p2[0][0]-p1[0][0]
e = axes.images[0].get_extent()
e[0] = e[0] + xshift
e[1] = e[1] + xshift
axes.images[0].set_extent(e)
_pylab.draw()
except:
print "whoops"
def image_click_yshift(axes = "gca"):
"""
Takes a starting and ending point, then shifts the image y by this amount
"""
if axes == "gca": axes = _pylab.gca()
try:
p1 = _pylab.ginput()
p2 = _pylab.ginput()
yshift = p2[0][1]-p1[0][1]
e = axes.images[0].get_extent()
e[2] = e[2] + yshift
e[3] = e[3] + yshift
axes.images[0].set_extent(e)
_pylab.draw()
except:
print "whoops"
def image_shift(xshift=0, yshift=0, axes="gca"):
"""
This will shift an image to a new location on x and y.
"""
if axes=="gca": axes = _pylab.gca()
e = axes.images[0].get_extent()
e[0] = e[0] + xshift
e[1] = e[1] + xshift
e[2] = e[2] + yshift
e[3] = e[3] + yshift
axes.images[0].set_extent(e)
_pylab.draw()
def image_set_clim(zmin=None, zmax=None, axes="gca"):
"""
This will set the clim (range) of the colorbar.
Setting zmin or zmax to None will not change them.
Setting zmin or zmax to "auto" will auto-scale them to include all the data.
"""
if axes=="gca": axes=_pylab.gca()
image = axes.images[0]
if zmin=='auto': zmin = _n.min(image.get_array())
if zmax=='auto': zmax = _n.max(image.get_array())
if zmin==None: zmin = image.get_clim()[0]
if zmax==None: zmax = image.get_clim()[1]
image.set_clim(zmin, zmax)
_pylab.draw()
def image_sliders(image="top", colormap="_last"):
return "NO!"
def image_ubertidy(figure="gcf", aspect=1.0, fontsize=18, fontweight='bold', fontname='Arial', ylabel_pad=0.007, xlabel_pad=0.010, colorlabel_pad=0.1, borderwidth=3.0, tickwidth=2.0, window_size=(550,500)):
if figure=="gcf": figure = _pylab.gcf()
# do this to both axes
for a in figure.axes:
_pylab.axes(a)
# remove the labels
a.set_title("")
a.set_xlabel("")
a.set_ylabel("")
# thicken the border
# we want thick axis lines
a.spines['top'].set_linewidth(borderwidth)
a.spines['left'].set_linewidth(borderwidth)
a.spines['bottom'].set_linewidth(borderwidth)
a.spines['right'].set_linewidth(borderwidth)
a.set_frame_on(True) # adds a thick border to the colorbar
# these two cover the main plot
_pylab.xticks(fontsize=fontsize, fontweight=fontweight, fontname=fontname)
_pylab.yticks(fontsize=fontsize, fontweight=fontweight, fontname=fontname)
# thicken the tick lines
for l in a.get_xticklines(): l.set_markeredgewidth(tickwidth)
for l in a.get_yticklines(): l.set_markeredgewidth(tickwidth)
# set the aspect and window size
_pylab.axes(figure.axes[0])
image_set_aspect(aspect)
get_figure_window().SetSize(window_size)
# we want to give the labels some breathing room (1% of the data range)
for label in _pylab.xticks()[1]: label.set_y(-xlabel_pad)
for label in _pylab.yticks()[1]: label.set_x(-ylabel_pad)
# need to draw to commit the changes up to this point. Annoying.
_pylab.draw()
# get the bounds of the first axes and come up with corresponding bounds
# for the colorbar
a1 = _pylab.gca()
b = a1.get_position()
aspect = figure.axes[1].get_aspect()
pos = []
pos.append(b.x0+b.width+0.02) # lower left x
pos.append(b.y0) # lower left y
pos.append(b.height/aspect) # width
pos.append(b.height) # height
# switch to the colorbar axes
_pylab.axes(figure.axes[1])
_pylab.gca().set_position(pos)
for label in _pylab.yticks()[1]: label.set_x(1+colorlabel_pad)
# switch back to the main axes
_pylab.axes(figure.axes[0])
_pylab.draw()
def integrate_shown_data(scale=1, fyname=1, autozero=0, **kwargs):
"""
Numerically integrates the data visible on the current/specified axes using
scale*fun.integrate_data(x,y). Modifies the visible data using
manipulate_shown_data(**kwargs)
autozero is the number of data points used to estimate the background
for subtraction. If autozero = 0, no background subtraction is performed.
"""
def I(x,y):
xout, iout = _fun.integrate_data(x, y, autozero=autozero)
print "Total =", scale*iout[-1]
return xout, scale*iout
if fyname==1: fyname = "$"+str(scale)+"\\times \\int dx$"
manipulate_shown_data(I, fxname=None, fyname=fyname, **kwargs)
def is_a_number(s):
try: eval(s); return 1
except: return 0
def manipulate_shown_data(f, input_axes="gca", output_axes=None, fxname=1, fyname=1, clear=1, pause=False, **kwargs):
"""
Loops over the visible data on the specified axes and modifies it based on
the function f(xdata, ydata), which must return new_xdata, new_ydata
input_axes which axes to pull the data from
output_axes which axes to dump the manipulated data (None for new figure)
fxname the name of the function on x
fyname the name of the function on y
1 means "use f.__name__"
0 or None means no change.
otherwise specify a string
**kwargs are sent to axes.plot
"""
# get the axes
if input_axes == "gca": a1 = _pylab.gca()
else: a1 = input_axes
# get the xlimits
xmin, xmax = a1.get_xlim()
# get the name to stick on the x and y labels
if fxname==1: fxname = f.__name__
if fyname==1: fyname = f.__name__
# get the output axes
if output_axes == None:
_pylab.figure(a1.figure.number+1)
a2 = _pylab.axes()
else:
a2 = output_axes
if clear: a2.clear()
# loop over the data
for line in a1.get_lines():
# if it's a line, do the manipulation
if isinstance(line, _mpl.lines.Line2D):
# get the data
x, y = line.get_data()
# trim the data according to the current zoom level
x, y = _fun.trim_data(xmin, xmax, x, y)
# do the manipulation
new_x, new_y = f(x,y)
# plot the new
_s.plot.xy.data(new_x, new_y, clear=0, label=line.get_label().replace("_", "-"), axes=a2, **kwargs)
# pause after each curve if we're supposed to
if pause:
_pylab.draw()
raw_input("<enter> ")
# set the labels and title.
if fxname in [0,None]: a2.set_xlabel(a1.get_xlabel())
else: a2.set_xlabel(fxname+"("+a1.get_xlabel()+")")
if fyname in [0,None]: a2.set_ylabel(a1.get_ylabel())
else: a2.set_ylabel(fyname+"("+a1.get_ylabel()+")")
_pylab.draw()
def manipulate_shown_xdata(fx, fxname=1, **kwargs):
"""
This defines a function f(xdata,ydata) returning fx(xdata), ydata and
runs manipulate_shown_data() with **kwargs sent to this. See
manipulate_shown_data() for more info.
"""
def f(x,y): return fx(x), y
f.__name__ = fx.__name__
manipulate_shown_data(f, fxname=fxname, fyname=None, **kwargs)
def manipulate_shown_ydata(fy, fyname=1, **kwargs):
"""
This defines a function f(xdata,ydata) returning xdata, fy(ydata) and
runs manipulate_shown_data() with **kwargs sent to this. See
manipulate_shown_data() for more info.
"""
def f(x,y): return x, fy(y)
f.__name__ = fy.__name__
manipulate_shown_data(f, fxname=None, fyname=fyname, **kwargs)
def _print_figures(figures, arguments='', file_format='pdf', target_width=8.5, target_height=11.0, target_pad=0.5):
"""
figure printing loop designed to be launched in a separate thread.
"""
for fig in figures:
# get the temp path
temp_path = _os.path.join(_settings.path_home, "temp")
# make the temp folder
_settings.MakeDir(temp_path)
# output the figure to postscript
path = _os.path.join(temp_path, "graph."+file_format)
# get the dimensions of the figure in inches
w=fig.get_figwidth()
h=fig.get_figheight()
# we're printing to 8.5 x 11, so aim for 7.5 x 10
target_height = target_height-2*target_pad
target_width = target_width -2*target_pad
# depending on the aspect we scale by the vertical or horizontal value
if 1.0*h/w > target_height/target_width:
# scale down according to the vertical dimension
new_h = target_height
new_w = w*target_height/h
else:
# scale down according to the hozo dimension
new_w = target_width
new_h = h*target_width/w
fig.set_figwidth(new_w)
fig.set_figheight(new_h)
# save it
fig.savefig(path, bbox_inches=_pylab.matplotlib.transforms.Bbox(
[[-target_pad, new_h-target_height-target_pad],
[target_width-target_pad, target_height-target_pad]]))
# set it back
fig.set_figheight(h)
fig.set_figwidth(w)
if not arguments == '':
c = _settings['instaprint'] + ' ' + arguments + ' "' + path + '"'
else:
c = _settings['instaprint'] + ' "' + path + '"'
print c
_os.system(c)
def instaprint(figure='gcf', arguments='', threaded=False, file_format='pdf'):
"""
Quick function that saves the specified figure as a postscript and then
calls the command defined by spinmob.prefs['instaprint'] with this
postscript file as the argument.
figure='gcf' can be 'all', a number, or a list of numbers
"""
global _settings
if not _settings.has_key('instaprint'):
print "No print command setup. Set the user variable settings['instaprint']."
return
if figure=='gcf': figure=[_pylab.gcf().number]
elif figure=='all': figure=_pylab.get_fignums()
if not getattr(figure,'__iter__',False): figure = [figure]
print "figure numbers in queue:", figure
figures=[]
for n in figure: figures.append(_pylab.figure(n))
# now run the ps printing command
if threaded:
# store the canvas type of the last figure
canvas_type = type(figures[-1].canvas)
# launch the aforementioned function as a separate thread
_thread.start_new_thread(_print_figures, (figures,arguments,file_format,))
# wait until the thread is running
_time.sleep(0.25)
# wait until the canvas type has returned to normal
t0 = _time.time()
while not canvas_type == type(figures[-1].canvas) and _time.time()-t0 < 5.0:
_time.sleep(0.1)
if _time.time()-t0 >= 5.0:
print "WARNING: Timed out waiting for canvas to return to original state!"
# bring back the figure and command line
_pylab.draw()
else:
_print_figures(figures, arguments, file_format)
_pylab.draw()
def shift(xshift=0, yshift=0, progressive=0, axes="gca"):
"""
This function adds an artificial offset to the lines.
yshift amount to shift vertically
xshift amount to shift horizontally
axes="gca" axes to do this on, "gca" means "get current axes"
progressive=0 progressive means each line gets more offset
set to 0 to shift EVERYTHING
"""
if axes=="gca": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for m in range(0,len(lines)):
if isinstance(lines[m], _mpl.lines.Line2D):
# get the actual data values
xdata = _n.array(lines[m].get_xdata())
ydata = _n.array(lines[m].get_ydata())
# add the offset
if progressive:
xdata += m*xshift
ydata += m*yshift
else:
xdata += xshift
ydata += yshift
# update the data for this line
lines[m].set_data(xdata, ydata)
# zoom to surround the data properly
auto_zoom()
def raise_figure_window(f=0):
"""
Raises the supplied figure number or figure window.
"""
if _fun.is_a_number(f): f = _pylab.figure(f)
f.canvas.manager.window.raise_()
def reverse_draw_order(axes="current"):
"""
This function takes the graph and reverses the draw order.
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# reverse the order
lines.reverse()
for n in range(0, len(lines)):
if isinstance(lines[n], _mpl.lines.Line2D):
axes.lines[n]=lines[n]
_pylab.draw()
def scale_x(scale, axes="current"):
"""
This function scales lines horizontally.
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
line.set_xdata(_pylab.array(line.get_xdata())*scale)
# update the title
title = axes.title.get_text()
title += ", x_scale="+str(scale)
axes.title.set_text(title)
# zoom to surround the data properly
auto_zoom()
def scale_y(scale, axes="current", lines="all"):
"""
This function scales lines vertically.
You can specify a line index, such as lines=0 or lines=[1,2,4]
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
line.set_ydata(_pylab.array(line.get_ydata())*scale)
# update the title
title = axes.title.get_text()
if not title == "":
title += ", y_scale="+str(scale)
axes.title.set_text(title)
# zoom to surround the data properly
auto_zoom()
def scale_y_universal(average=[1,10], axes="current"):
"""
This function scales lines vertically.
average=[1,10] indices of average universal point
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for m in range(0,len(lines)):
if isinstance(lines[m], _mpl.lines.Line2D):
# get the actual data values
xdata = lines[m].get_xdata()
ydata = lines[m].get_ydata()
# figure out the scaling factor
s=0
for n in range(average[0], average[1]+1): s += ydata[n]
scale = 1.0*s/(average[1]-average[0]+1.0)
# loop over the ydata to scale it
for n in range(0,len(ydata)): ydata[n] = ydata[n]/scale
# update the data for this line
lines[m].set_data(xdata, ydata)
# update the title
title = axes.title.get_text()
title += ", universal scale"
axes.title.set_text(title)
# zoom to surround the data properly
auto_zoom()
def set_title(axes="current", title=""):
if axes=="current": axes = _pylab.gca()
axes.title.set_text(title)
_pylab.draw()
def set_figure_window_geometry(fig='gcf', position=None, size=None):
"""
This will currently only work for Qt4Agg and WXAgg backends.
postion = [x, y]
size = [width, height]
fig can be 'gcf', a number, or a figure object.
"""
if type(fig)==str: fig = _pylab.gcf()
elif _fun.is_a_number(fig): fig = _pylab.figure(fig)
# Qt4Agg backend. Probably would work for other Qt stuff
if _pylab.get_backend().find('Qt') >= 0:
w = fig.canvas.window()
if not size == None:
w.resize(size[0],size[1])
if not position == None:
w.move(position[0], position[1])
# WXAgg backend. Probably would work for other Qt stuff.
elif _pylab.get_backend().find('WX') >= 0:
w = fig.canvas.Parent
if not size == None:
w.SetSize(size)
if not position == None:
w.SetPosition(position)
def set_xrange(xmin="same", xmax="same", axes="gca"):
if axes == "gca": axes = _pylab.gca()
xlim = axes.get_xlim()
if xmin == "same": xmin = xlim[0]
if xmax == "same": xmax = xlim[1]
axes.set_xlim(xmin,xmax)
_pylab.draw()
def set_yrange(ymin="same", ymax="same", axes="gca"):
if axes == "gca": axes = _pylab.gca()
ylim = axes.get_ylim()
if ymin == "same": ymin = ylim[0]
if ymax == "same": ymax = ylim[1]
axes.set_ylim(ymin,ymax)
_pylab.draw()
def set_yticks(start, step, axes="gca"):
"""
This will generate a tick array and apply said array to the axis
"""
if axes=="gca": axes = _pylab.gca()
# first get one of the tick label locations
xposition = axes.yaxis.get_ticklabels()[0].get_position()[0]
# get the bounds
ymin, ymax = axes.get_ylim()
# get the starting tick
nstart = int(_pylab.floor((ymin-start)/step))
nstop = int(_pylab.ceil((ymax-start)/step))
ticks = []
for n in range(nstart,nstop+1): ticks.append(start+n*step)
axes.set_yticks(ticks)
# set the x-position
for t in axes.yaxis.get_ticklabels():
x, y = t.get_position()
t.set_position((xposition, y))
_pylab.draw()
def set_xticks(start, step, axes="gca"):
"""
This will generate a tick array and apply said array to the axis
"""
if axes=="gca": axes = _pylab.gca()
# first get one of the tick label locations
yposition = axes.xaxis.get_ticklabels()[0].get_position()[1]
# get the bounds
xmin, xmax = axes.get_xlim()
# get the starting tick
nstart = int(_pylab.floor((xmin-start)/step))
nstop = int(_pylab.ceil((xmax-start)/step))
ticks = []
for n in range(nstart,nstop+1): ticks.append(start+n*step)
axes.set_xticks(ticks)
# set the y-position
for t in axes.xaxis.get_ticklabels():
x, y = t.get_position()
t.set_position((x, yposition))
_pylab.draw()
def invert(axes="current"):
"""
inverts the plot
"""
if axes=="current": axes = _pylab.gca()
scale_y(-1,axes)
def set_markers(marker="o", axes="current"):
if axes == "current": axes = _pylab.gca()
set_all_line_attributes("marker", marker, axes)
def set_all_line_attributes(attribute="lw", value=2, axes="current", refresh=True):
"""
This function sets all the specified line attributes.
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
_pylab.setp(line, attribute, value)
# update the plot
if refresh: _pylab.draw()
def set_line_attribute(line=-1, attribute="lw", value=2, axes="current", refresh=True):
"""
This function sets all the specified line attributes.
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
line = axes.get_lines()[-1]
_pylab.setp(line, attribute, value)
# update the plot
if refresh: _pylab.draw()
def smooth_line(line, smoothing=1, trim=True, draw=True):
"""
This takes a line instance and smooths its data with nearest neighbor averaging.
"""
# get the actual data values
xdata = list(line.get_xdata())
ydata = list(line.get_ydata())
_fun.smooth_array(ydata, smoothing)
if trim:
for n in range(0, smoothing):
xdata.pop(0); xdata.pop(-1)
ydata.pop(0); ydata.pop(-1)
# don't do anything if we don't have any data left
if len(ydata) == 0:
print "There's nothing left in "+str(line)+"!"
else:
# otherwise set the data with the new arrays
line.set_data(xdata, ydata)
# we refresh in real time for giggles
if draw: _pylab.draw()
def coarsen_line(line, coarsen=1, draw=True):
"""
This takes a line instance and smooths its data with nearest neighbor averaging.
"""
# get the actual data values
xdata = line.get_xdata()
ydata = line.get_ydata()
xdata = _fun.coarsen_array(xdata, coarsen)
ydata = _fun.coarsen_array(ydata, coarsen)
# don't do anything if we don't have any data left
if len(ydata) == 0: print "There's nothing left in "+str(line)+"!"
# otherwise set the data with the new arrays
else: line.set_data(xdata, ydata)
# we refresh in real time for giggles
if draw: _pylab.draw()
def smooth_selected_trace(trim=True, axes="gca"):
"""
This cycles through all the lines in a set of axes, highlighting them,
and asking for how much you want to smooth by (0 or <enter> is valid)
"""
if axes=="gca": axes = _pylab.gca()
# get all the lines
lines = axes.get_lines()
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
# first highlight it
fatten_line(line)
# get the smoothing factor
ready = 0
while not ready:
response = raw_input("Smoothing Factor (<enter> to skip): ")
try:
int(response)
ready=1
except:
if response=="\n": ready = 1
else: print "No!"
if not response == "\n":
smooth_line(line, int(response), trim)
# return the line to normal
unfatten_line(line)
def smooth_all_traces(smoothing=1, trim=True, axes="gca"):
"""
This function does nearest-neighbor smoothing of the data
"""
if axes=="gca": axes=_pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
smooth_line(line, smoothing, trim, draw=False)
_pylab.draw()
def coarsen_all_traces(coarsen=1, axes="all", figure=None):
"""
This function does nearest-neighbor smoothing of the data
"""
if axes=="gca": axes=_pylab.gca()
if axes=="all":
if not figure: f = _pylab.gcf()
axes = f.axes
if not _fun.is_iterable(axes): axes = [axes]
for a in axes:
# get the lines from the plot
lines = a.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
coarsen_line(line, coarsen, draw=False)
_pylab.draw()
def line_math(fx=None, fy=None, axes='gca'):
"""
applies function fx to all xdata and fy to all ydata.
"""
if axes=='gca': axes = _pylab.gca()
lines = axes.get_lines()
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
xdata, ydata = line.get_data()
if not fx==None: xdata = fx(xdata)
if not fy==None: ydata = fy(ydata)
line.set_data(xdata,ydata)
_pylab.draw()
def trim(xmin="auto", xmax="auto", ymin="auto", ymax="auto", axes="current"):
"""
This function just removes all data from the plots that
is outside of the [xmin,xmax,ymin,ymax] range.
"auto" means "determine from the current axes's range"
"""
if axes=="current": axes = _pylab.gca()
# if trim_visible is true, use the current plot's limits
if xmin=="auto": (xmin, dummy) = axes.get_xlim()
if xmax=="auto": (dummy, xmax) = axes.get_xlim()
if ymin=="auto": (ymin, dummy) = axes.get_ylim()
if ymax=="auto": (dummy, ymax) = axes.get_ylim()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
# get the actual data values
old_xdata = line.get_xdata()
old_ydata = line.get_ydata()
# loop over the xdata and trim if it's outside the range
new_xdata = []
new_ydata = []
for n in range(0, len(old_xdata)):
# if it's in the data range
if old_xdata[n] >= xmin and old_xdata[n] <= xmax \
and old_ydata[n] >= ymin and old_ydata[n] <= ymax:
# append it to the new x and y data set
new_xdata.append(old_xdata[n])
new_ydata.append(old_ydata[n])
# don't do anything if we don't have any data left
if len(new_xdata) == 0:
print "There's nothing left in "+str(line)+"!"
else:
# otherwise set the data with the new arrays
line.set_data(new_xdata, new_ydata)
# loop over the collections, where the vertical parts of the error bars are stored
for c in axes.collections:
# loop over the paths and pop them if they're bad
for n in range(len(c._paths)-1,-1,-1):
# loop over the vertices
naughty = False
for v in c._paths[n].vertices:
# if the path contains any vertices outside the trim box, kill it!
if v[0] < xmin or v[0] > xmax or v[1] < ymin or v[1] > ymax:
naughty=True
# BOOM
if naughty: c._paths.pop(n)
# zoom to surround the data properly
auto_zoom()
def xscale(scale='log'):
_pylab.xscale(scale)
_pylab.draw()
def yscale(scale='log'):
_pylab.yscale(scale)
_pylab.draw()
def ubertidy(figure="gcf", zoom=True, width=None, height=None, fontsize=12, fontweight='normal', fontname='Arial',
borderwidth=1.2, tickwidth=1, ticks_point="in", xlabel_pad=0.010, ylabel_pad=0.008, window_size=[550,400]):
"""
This guy performs the ubertidy from the helper on the first window.
Currently assumes there is only one set of axes in the window!
"""
if figure=="gcf": f = _pylab.gcf()
else: f = figure
set_figure_window_geometry(fig=f, size=window_size)
for n in range(len(f.axes)):
# get the axes
a = f.axes[n]
# set the current axes
_pylab.axes(a)
# we want thick axis lines
a.spines['top'].set_linewidth(borderwidth)
a.spines['left'].set_linewidth(borderwidth)
a.spines['bottom'].set_linewidth(borderwidth)
a.spines['right'].set_linewidth(borderwidth)
# get the tick lines in one big list
xticklines = a.get_xticklines()
yticklines = a.get_yticklines()
# set their marker edge width
_pylab.setp(xticklines+yticklines, mew=tickwidth)
# set what kind of tickline they are (outside axes)
if ticks_point=="out":
for l in xticklines: l.set_marker(_mpl.lines.TICKDOWN)
for l in yticklines: l.set_marker(_mpl.lines.TICKLEFT)
# get rid of the top and right ticks
a.xaxis.tick_bottom()
a.yaxis.tick_left()
# we want bold fonts
_pylab.xticks(fontsize=fontsize, fontweight=fontweight, fontname=fontname)
_pylab.yticks(fontsize=fontsize, fontweight=fontweight, fontname=fontname)
# we want to give the labels some breathing room (1% of the data range)
for label in _pylab.xticks()[1]: label.set_y(-xlabel_pad)
for label in _pylab.yticks()[1]: label.set_x(-ylabel_pad)
# get rid of tick label offsets
#a.ticklabel_format(style='plain')
# set the position/size of the axis in the window
p = a.get_position().bounds
if width: a.set_position([0.15,p[1],0.15+width*0.5,p[3]])
p = a.get_position().bounds
if height: a.set_position([p[0],0.17,p[2],0.17+height*0.5])
# set the axis labels to empty (so we can add them with a drawing program)
a.set_title('')
a.set_xlabel('')
a.set_ylabel('')
# kill the legend
a.legend_ = None
# zoom!
if zoom: auto_zoom(axes=a)
def make_inset(figure="current", width=1, height=1):
"""
This guy makes the figure thick and small, like an inset.
Currently assumes there is only one set of axes in the window!
"""
# get the current figure if we're not supplied with one
if figure == "current": figure = _pylab.gcf()
# get the window
w = figure.canvas.GetParent()
# first set the size of the window
w.SetSize([220,300])
# we want thick axis lines
figure.axes[0].get_frame().set_linewidth(3.0)
# get the tick lines in one big list
xticklines = figure.axes[0].get_xticklines()
yticklines = figure.axes[0].get_yticklines()
# set their marker edge width
_pylab.setp(xticklines+yticklines, mew=2.0)
# set what kind of tickline they are (outside axes)
for l in xticklines: l.set_marker(_mpl.lines.TICKDOWN)
for l in yticklines: l.set_marker(_mpl.lines.TICKLEFT)
# get rid of the top and right ticks
figure.axes[0].xaxis.tick_bottom()
figure.axes[0].yaxis.tick_left()
# we want bold fonts
_pylab.xticks(fontsize=20, fontweight='bold', fontname='Arial')
_pylab.yticks(fontsize=20, fontweight='bold', fontname='Arial')
# we want to give the labels some breathing room (1% of the data range)
figure.axes[0].xaxis.set_ticklabels([])
figure.axes[0].yaxis.set_ticklabels([])
# set the position/size of the axis in the window
figure.axes[0].set_position([0.1,0.1,0.1+0.7*width,0.1+0.7*height])
# set the axis labels to empty (so we can add them with a drawing program)
figure.axes[0].set_title('')
figure.axes[0].set_xlabel('')
figure.axes[0].set_ylabel('')
# set the position of the legend far away
figure.axes[0].legend=None
# zoom!
auto_zoom(figure.axes[0], 0.07, 0.07)
def export_figure(dpi=200, figure="gcf", path="ask"):
"""
Saves the actual postscript data for the figure.
"""
if figure=="gcf": figure = _pylab.gcf()
if path=="ask": path = _s.dialogs.Save("*.*", default_directory="save_plot_default_directory")
if path=="":
print "aborted."
return
figure.savefig(path, dpi=dpi)
def save_plot(axes="gca", path="ask"):
"""
Saves the figure in my own ascii format
"""
global line_attributes
# choose a path to save to
if path=="ask": path = _s.dialogs.Save("*.plot", default_directory="save_plot_default_directory")
if path=="":
print "aborted."
return
if not path.split(".")[-1] == "plot": path = path+".plot"
f = file(path, "w")
# if no argument was given, get the current axes
if axes=="gca": axes=_pylab.gca()
# now loop over the available lines
f.write("title=" +axes.title.get_text().replace('\n', '\\n')+'\n')
f.write("xlabel="+axes.xaxis.label.get_text().replace('\n','\\n')+'\n')
f.write("ylabel="+axes.yaxis.label.get_text().replace('\n','\\n')+'\n')
for l in axes.lines:
# write the data header
f.write("trace=new\n")
f.write("legend="+l.get_label().replace('\n', '\\n')+"\n")
for a in line_attributes: f.write(a+"="+str(_pylab.getp(l, a)).replace('\n','')+"\n")
# get the data
x = l.get_xdata()
y = l.get_ydata()
# loop over the data
for n in range(0, len(x)): f.write(str(float(x[n])) + " " + str(float(y[n])) + "\n")
f.close()
def save_figure_raw_data(figure="gcf", **kwargs):
"""
This will just output an ascii file for each of the traces in the shown figure.
**kwargs are sent to dialogs.Save()
"""
# choose a path to save to
path = _s.dialogs.Save(**kwargs)
if path=="": return "aborted."
# if no argument was given, get the current axes
if figure=="gcf": figure = _pylab.gcf()
for n in range(len(figure.axes)):
a = figure.axes[n]
for m in range(len(a.lines)):
l = a.lines[m]
x = l.get_xdata()
y = l.get_ydata()
p = _os.path.split(path)
p = _os.path.join(p[0], "axes" + str(n) + " line" + str(m) + " " + p[1])
print p
# loop over the data
f = open(p, 'w')
for j in range(0, len(x)):
f.write(str(x[j]) + "\t" + str(y[j]) + "\n")
f.close()
def load_plot(clear=1, offset=0, axes="gca"):
# choose a path to load the file from
path = _s.dialogs.SingleFile("*.*", default_directory="save_plot_default_directory")
if path=="": return
# read the file in
lines = _fun.read_lines(path)
# if no argument was given, get the current axes
if axes=="gca": axes=_pylab.gca()
# if we're supposed to, clear the plot
if clear:
axes.figure.clear()
_pylab.gca()
# split by space delimiter and see if the first element is a number
xdata = []
ydata = []
line_stuff = []
legend = []
title = 'reloaded plot with no title'
xlabel = 'x-data with no label'
ylabel = 'y-data with no label'
for line in lines:
s = line.strip().split('=')
if len(s) > 1: # header stuff
if s[0].strip() == 'title':
# set the title of the plot
title = ""
for n in range(1,len(s)): title += " "+s[n].replace('\\n', '\n')
elif s[0].strip() == 'xlabel':
# set the title of the plot
xlabel = ""
for n in range(1,len(s)): xlabel += " "+s[n].replace('\\n', '\n')
elif s[0].strip() == 'ylabel':
# set the title of the plot
ylabel = ""
for n in range(1,len(s)): ylabel += " "+s[n].replace('\\n', '\n')
elif s[0].strip() == 'legend':
l=""
for n in range(1,len(s)): l += " " + s[n].replace('\\n', '\n')
legend.append(l)
elif s[0].strip() == 'trace':
# if we're on a new plot
xdata.append([])
ydata.append([])
line_stuff.append({})
elif s[0].strip() in line_attributes:
line_stuff[-1][s[0].strip()] = s[1].strip()
else: # data
s = line.strip().split(' ')
try:
float(s[0])
float(s[1])
xdata[-1].append(float(s[0]))
ydata[-1].append(float(s[1])+offset)
except:
print "error s=" + str(s)
for n in range(0, len(xdata)):
axes.plot(xdata[n], ydata[n])
l = axes.get_lines()[-1]
l.set_label(legend[n])
for key in line_stuff[n]:
try: _pylab.setp(l, key, float(line_stuff[n][key]))
except: _pylab.setp(l, key, line_stuff[n][key])
axes.set_title(title)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
format_figure(axes.figure)
def modify_legend(axes="gca"):
# get the axes
if axes=="gca": axes = _pylab.gca()
# get the lines
lines = axes.get_lines()
# loop over the lines
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
# highlight the line
fatten_line(line)
# get the label (from the legend)
label = line.get_label()
print label
new_label = raw_input("New Label: ")
if new_label == "q" or new_label == "quit":
unfatten_line(line)
return
if not new_label == "\n": line.set_label(new_label)
unfatten_line(line)
format_figure()
def fatten_line(line, william_fatner=2.0):
size = line.get_markersize()
width = line.get_linewidth()
line.set_markersize(size*william_fatner)
line.set_linewidth(width*william_fatner)
_pylab.draw()
def unfatten_line(line, william_fatner=0.5):
fatten_line(line, william_fatner)
def legend(location='best', fontsize=16, axes="gca"):
if axes=="gca": axes = _pylab.gca()
axes.legend(loc=location, prop=_mpl.font_manager.FontProperties(size=fontsize))
_pylab.draw()
#
# Style cycle, available for use in plotting
#
class style_cycle:
def __init__(self, linestyles=['-'], markers=['s','^','o'], colors=['k','r','b','g','m'], line_colors=None, face_colors=None, edge_colors=None):
"""
Set up the line/marker rotation cycles.
linestyles, markers, and colors need to be lists, and you can override
using line_colors, and face_colors, and edge_colors (markeredgecolor) by
setting them to a list instead of None.
"""
# initial setup, assuming all the overrides are None
self.linestyles = linestyles
self.markers = markers
self.line_colors = colors
self.face_colors = colors
self.edge_colors = colors
# Apply the override colors
if not line_colors == None: self.line_colors = line_colors
if not face_colors == None: self.face_colors = face_colors
if not edge_colors == None: self.edge_colors = edge_colors
self.line_colors_index = 0
self.markers_index = 0
self.linestyles_index = 0
self.face_colors_index = 0
self.edge_colors_index = 0
# binding for the user to easily re-initialize
initialize = __init__
def reset(self):
self.line_colors_index = 0
self.markers_index = 0
self.linestyles_index = 0
self.face_colors_index = 0
self.edge_colors_index = 0
def get_line_color(self, increment=1):
"""
Returns the current color, then increments the color by what's specified
"""
i = self.line_colors_index
self.line_colors_index += increment
if self.line_colors_index >= len(self.line_colors):
self.line_colors_index = self.line_colors_index-len(self.line_colors)
if self.line_colors_index >= len(self.line_colors): self.line_colors_index=0 # to be safe
return self.line_colors[i]
def set_all_colors(self, colors=['k','k','r','r','b','b','g','g','m','m']):
self.line_colors=colors
self.face_colors=colors
self.edge_colors=colors
self.reset()
def get_marker(self, increment=1):
"""
Returns the current marker, then increments the marker by what's specified
"""
i = self.markers_index
self.markers_index += increment
if self.markers_index >= len(self.markers):
self.markers_index = self.markers_index-len(self.markers)
if self.markers_index >= len(self.markers): self.markers_index=0 # to be safe
return self.markers[i]
def set_markers(self, markers=['o']):
self.markers=markers
self.reset()
def get_linestyle(self, increment=1):
"""
Returns the current marker, then increments the marker by what's specified
"""
i = self.linestyles_index
self.linestyles_index += increment
if self.linestyles_index >= len(self.linestyles):
self.linestyles_index = self.linestyles_index-len(self.linestyles)
if self.linestyles_index >= len(self.linestyles): self.linestyles_index=0 # to be safe
return self.linestyles[i]
def set_linestyles(self, linestyles=['-']):
self.linestyles=linestyles
self.reset()
def get_face_color(self, increment=1):
"""
Returns the current face, then increments the face by what's specified
"""
i = self.face_colors_index
self.face_colors_index += increment
if self.face_colors_index >= len(self.face_colors):
self.face_colors_index = self.face_colors_index-len(self.face_colors)
if self.face_colors_index >= len(self.face_colors): self.face_colors_index=0 # to be safe
return self.face_colors[i]
def set_face_colors(self, colors=['k','none','r','none','b','none','g','none','m','none']):
self.face_colors=colors
self.reset()
def get_edge_color(self, increment=1):
"""
Returns the current face, then increments the face by what's specified
"""
i = self.edge_colors_index
self.edge_colors_index += increment
if self.edge_colors_index >= len(self.edge_colors):
self.edge_colors_index = self.edge_colors_index-len(self.edge_colors)
if self.edge_colors_index >= len(self.edge_colors): self.edge_colors_index=0 # to be safe
return self.edge_colors[i]
def set_edge_colors(self, colors=['k','none','r','none','b','none','g','none','m','none']):
self.edge_colors=colors
self.reset()
def apply(self, axes="gca"):
"""
Applies the style cycle to the lines in the axes specified
"""
if axes == "gca": axes = _pylab.gca()
self.reset()
lines = axes.get_lines()
for l in lines:
l.set_color(self.get_line_color(1))
l.set_mfc(self.get_face_color(1))
l.set_marker(self.get_marker(1))
l.set_mec(self.get_edge_color(1))
l.set_linestyle(self.get_linestyle(1))
_pylab.draw()
def __call__(self, increment=1):
return self.get_line_color(increment)
# this is the guy in charge of keeping track of the rotation of colors and symbols for plotting
style = style_cycle(colors = ['k','r','b','g','m'],
markers = ['o', '^', 's'],
linestyles = ['-'])
| gpl-3.0 |
billy-inn/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
ntbrewer/Pyspectr | other/pydamm_old.py | 2 | 22846 | #!/usr/bin/python3
"""
Krzysztof Miernik 2012
[email protected]
Damm-like analysis program for his/drr experiment spectra files.
"""
import numpy
import math
import matplotlib.pyplot as plt
from matplotlib import cm, ticker
import sys
sys.path.append('/home/krm/Documents/Programs/Python/Pyspectr')
from Pyspectr import hisfile as hisfile
from Pyspectr.decay_fitter import DecayFitter as DecayFitter
from Pyspectr.peak_fitter import PeakFitter as PeakFitter
class Experiment:
def __init__(self, file_name, size=11):
"""Initialize plot and open data file (file_name)"""
self.hisfile = None
if size == 1:
shape = (8, 6)
elif size == 11:
shape = (12, 8)
else:
shape = (12, 8)
if size != 0:
plt.figure(1, shape)
plt.xlabel('X')
plt.ylabel('Y')
plt.ion()
plt.show()
# Max bins in 2d histogram
self.MAX_2D_BIN = 256
self.file_name = file_name
self.load(file_name)
self.current = {'x' : None,
'y' : None,
'z' : None,
'id' : None}
self.peaks = []
def _replace_chars(self, text):
"""Clear text from characters that are not accepted by latex"""
replace_chars = [['_', '-'],
['$', '\$'],
['%', '\%'],
['~', ' '],
['"', "''"],
['\\', ' ']]
replaced_text = text
for r_ch in replace_chars:
replaced_text = replaced_text.replace(r_ch[0], r_ch[1])
return replaced_text
def load(self, file_name):
"""Load his file (also tar gzipped files)"""
self.hisfile = hisfile.HisFile(file_name)
def d(self, his_id, norm=1, clear=True, plot=True):
"""Plot histogram in current window/ax """
if self.hisfile is None:
print('Please load data file first')
return None
title = self.hisfile.histograms[his_id]['title']
data = self.hisfile.load_histogram(his_id)
if data[0] == 1:
if clear and plot:
self.clear()
label = '{}: {}'.format(his_id,
self.hisfile.histograms[his_id]['title'])
label = self._replace_chars(label)
label = ('{}: {}'.format(label, his_id, title))
if norm != 1:
label += 'x{: .3f}'.format(1 / norm)
if plot:
plt.plot(data[1], data[3] / norm, ls='steps-mid', label=label)
if self.current.get('xlim') is None:
self.current['xlim'] = plt.xlim()
else:
plt.xlim(self.current['xlim'])
plt.legend(loc=0, fontsize='small')
self.current['id'] = his_id
self.current['x'] = data[1]
self.current['y'] = data[3]/norm
self.current['z'] = None
return (data[1], data[3]/norm)
elif len(data) == 3:
print('{} is not a 1D histogram'.format(his_id))
def dd(self, his_id, rx=None, ry=None, logz=False, clear=True, plot=True):
"""Plot 2D histogram in current window/ax,
rx is x range, ry is y range
"""
if self.hisfile is None:
print('Please load data file first')
return None
title = '{}: {}'.format(his_id,
self.hisfile.histograms[his_id]['title'])
title = self._replace_chars(title)
data = self.hisfile.load_histogram(his_id)
if data[0] != 2:
print('{} is not a 2D histogram'.format(his_id))
else:
if clear and plot:
self.clear()
x = data[1]
y = data[2]
w = data[3]
if rx is not None:
x = x[rx[0]:rx[1]]
w = w[rx[0]:rx[1],:]
if ry is not None:
y = y[ry[0]:ry[1]]
w = w[:, ry[0]:ry[1]]
nx = len(x)
ny = len(y)
binx = 1
biny = 1
# First rebin data if too large
if nx > self.MAX_2D_BIN:
binx = math.ceil(nx / self.MAX_2D_BIN)
missing = binx * self.MAX_2D_BIN - nx
if missing > 0:
addx = numpy.arange(data[1][-1] + 1,
data[1][-1] + missing + 1)
x = numpy.concatenate((x, addx))
nx = len(x)
z = numpy.zeros((missing, ny))
w = numpy.concatenate((w, z), axis=0)
x = numpy.reshape(x, (-1, binx))
x = x.mean(axis=1)
if ny > self.MAX_2D_BIN:
biny = math.ceil(ny / self.MAX_2D_BIN)
missing = biny * self.MAX_2D_BIN - ny
if missing > 0:
addy = numpy.arange(data[2][-1] + 1,
data[2][-1] + missing + 1)
y = numpy.concatenate((y, addy))
z = numpy.zeros((nx, missing))
w = numpy.concatenate((w, z), axis=1)
y = numpy.reshape(y, (-1, biny))
y = y.mean(axis=1)
nx = len(x)
ny = len(y)
if nx != len(data[2]) or ny != len(data[1]):
w = numpy.reshape(w, (nx, binx, ny, biny)).mean(3).mean(1)
w = numpy.transpose(w)
if plot:
z = w
if logz:
z = numpy.ma.masked_where(w <= 0, numpy.log10(w))
title += ' (log10)'
plt.title(title)
CS = plt.pcolormesh(x, y, z,
cmap=cm.RdYlGn_r)
plt.xlim(rx)
plt.ylim(ry)
plt.colorbar()
self.current['id'] = his_id
self.current['x'] = x
self.current['y'] = y
self.current['z'] = w
return (x, y, w)
def gx(self, his_id, rx=None, ry=None, bg=None, norm=1,
clear=True, plot=True):
"""Make projection on Y axis of 2D histogram with gate
set on X (rx) and possibly on Y (ry)
"""
if self.hisfile is None:
print('Please load data file first')
return None
if rx is None or len(rx) != 2:
print('Please select gate on X in (min, max) format')
return None
if ry is not None and len(ry) != 2:
print('Please select gate on Y in (min, max) format')
return None
data = self.hisfile.load_histogram(his_id)
if data[0] != 2:
print('{} is not a 2D histogram'.format(his_id))
else:
if clear and plot:
self.clear()
x = data[1]
y = data[2]
w = data[3]
if ry is None:
ry = [0, len(y)-2]
y = y[ry[0]:ry[1]+1]
g = w[rx[0]:rx[1]+1, ry[0]:ry[1]+1].sum(axis=0)
if bg is not None:
if (bg[1] - bg[0]) != (rx[1] - rx[0]):
print('#Warning: background and gate of different widths')
g = g - w[bg[0]:bg[1]+1, ry[0]:ry[1]+1].sum(axis=0)
label = 'gx({},{}) {}: {}'.format(rx[0], rx[1], his_id,
self.hisfile.histograms[his_id]['title'])
label = self._replace_chars(label)
if norm == 'sum':
norm = g.sum()
if bg is not None:
label += ' bg ({}, {})'.format(bg[0], bg[1])
if plot:
plt.plot(y, g/norm, ls='steps-mid', label=label)
plt.legend(loc=0, fontsize='small')
if self.current.get('xlim') is None:
self.current['xlim'] = plt.xlim()
else:
plt.xlim(self.current['xlim'])
self.current['id'] = his_id
self.current['x'] = y
self.current['y'] = g/norm
self.current['z'] = None
return (y, g/norm)
def gy(self, his_id, ry=None, rx=None, bg=None, norm=1,
clear=True, plot=True):
"""Make projection on X axis of 2D histogram with gate
set on Y (ry) and possibly on X (rx), the bg gate selects a
background region to be subtracted from data
"""
if self.hisfile is None:
print('Please load data file first')
return None
if ry is None or len(ry) != 2:
print('Please select gate on Y in (min, max) format')
return None
if rx is not None and len(rx) != 2:
print('Please select gate on X in (min, max) format')
return None
data = self.hisfile.load_histogram(his_id)
if data[0] != 2:
print('{} is not a 2D histogram'.format(his_id))
else:
if clear:
self.clear()
x = data[1]
y = data[2]
w = data[3]
if rx is None:
rx = [0, len(x)-2]
x = x[rx[0]:rx[1]+1]
g = w[rx[0]:rx[1]+1, ry[0]:ry[1]+1].sum(axis=1)
if bg is not None:
if (bg[1] - bg[0]) != (ry[1] - ry[0]):
print('#Warning: background and gate of different widths')
g = g - w[rx[0]:rx[1]+1, bg[0]:bg[1]+1].sum(axis=1)
label = 'gy({},{}) {}: {}'.format(ry[0], ry[1], his_id,
self.hisfile.histograms[his_id]['title'])
label = self._replace_chars(label)
if norm == 'sum':
norm = g.sum()
if bg is not None:
label += ' bg ({}, {})'.format(bg[0], bg[1])
if plot:
plt.plot(x, g/norm, ls='steps-mid', label=label)
plt.legend(loc=0, fontsize='small')
if self.current.get('xlim') is None:
self.current['xlim'] = plt.xlim()
else:
plt.xlim(self.current['xlim'])
self.current['id'] = his_id
self.current['x'] = x
self.current['y'] = g/norm
self.current['z'] = None
return (x, g/norm)
def clear(self):
"""Clear current plot"""
plt.clf()
plt.xlabel('X')
plt.ylabel('Y')
self.current['xlim'] = None
def dl(self, x0, x1):
"""Change xrange of 1D histogram"""
self.current['xlim'] = (x0, x1)
plt.xlim(x0, x1)
if self.current['y'] is not None:
plt.ylim(min(self.current['y'][x0:x1]),
max(self.current['y'][x0:x1]))
def dmm(self, y0, y1):
"""Change yrange of 1D histogram """
plt.ylim(y0, y1)
def log(self):
"""Change y scale to log"""
plt.yscale('log')
def lin(self):
"""Change y scale to linear"""
plt.yscale('linear')
def list(self, his_id=None):
"""List all histograms or details on selected histogram"""
if his_id is None:
for key in sorted(self.hisfile.histograms.keys()):
print('{: <6} {}'.format(key,
self.hisfile.histograms[key]['title']))
else:
try:
dim = self.hisfile.histograms[his_id]['dimension']
xmin = []
xmax = []
for i in range(dim):
xmin.append(self.hisfile.histograms[his_id]['minc'][0])
xmax.append(self.hisfile.histograms[his_id]['maxc'][0])
print('{: <10} : {}'.format('ID', his_id))
print('{: <10} : {}'.format('Title',
self.hisfile.histograms[his_id]['title']))
print('{: <10} : {}'.format('Dimensions', dim))
print('{: <10} : ({}, {})'.format('X range', xmin[0], xmax[0]))
if dim > 1:
print('{: <10} : ({}, {})'.format('Y range',
xmin[1], xmax[1]))
except KeyError:
print('Histogram id = {} not found'.format(his_id))
def rebin(self, bin_size, clear=True, plot=True):
"""Re-bin the current histogram"""
if (self.current['x'] is not None and
self.current['y'] is not None):
x = self.rebin_histogram(self.current['x'], bin_size,
False, False)
y = self.rebin_histogram(self.current['y'], bin_size)
if plot:
xlim = plt.xlim()
if clear:
self.clear()
plt.plot(x, y, ls='steps-mid')
plt.xlim(xlim)
self.current['x'] = x
self.current['y'] = y
return (x, y)
def rebin_histogram(self, histogram, bin_size, add=True, zeros=True):
"""Bin histogram. If add is True, the bins are sum of bins,
otherwise the mean number of counts is used.
If zeros is true, in case the histogram must be extended
(len(histogram) % bin_size != 0) is extended with zeros,
otherwise an extrapolation of last two counts is used.
Example
y1 = binned(y1, bin1y)
x1 = binned(x1, bin1y, False, False)
"""
if len(histogram) % bin_size != 0:
if zeros:
addh = numpy.zeros((bin_size - len(histogram) % bin_size))
histogram = numpy.concatenate((histogram, addh))
else:
d = histogram[-1] - histogram[-2]
l = histogram[-1]
n = bin_size - len(histogram) % bin_size
addh = numpy.arange(l, l + n * d, d)
histogram = numpy.concatenate((histogram, addh))
if add:
return histogram.reshape((-1, bin_size)).sum(axis=1)
else:
return histogram.reshape((-1, bin_size)).mean(axis=1)
def mark(self, x_mark):
"""Put vertical line on plot to mark the peak (or guide the eye)"""
plt.axvline(x_mark, ls='--', c='black')
def set_efficiency_params(self, pars):
"""Sets efficiency calibration parameters, the efficiency is calculated
as
eff = exp(a0 + a1 * log(E) + a2 * log(E)**2 + ...)
"""
self.eff_pars = pars
def apply_efficiency_calibration(self, his_id=None, clear=True, plot=True):
if his_id is not None:
data = self.hisfile.load_histogram(his_id)
if data[0] != 1:
print('{} is not a 1D histogram'.format(his_id))
return None
x_axis = data[1]
data_y = data[3]
else:
x_axis = self.current['x']
data_y = self.current['y']
# eff(E) = exp(a0 + a1 * log(E) + a2 * log(E)**2 + ...
# lx = log(E)
# s = a0 + a1 * lx + ...
# eff(E) = exp(s)
for i, x in enumerate(x_axis):
lx = math.log(x)
s = 0
for p, a in enumerate(self.eff_pars):
s += a * lx**p
data_y[i] = data_y[i] / math.exp(s)
self.current['y'] = data_y
if plot:
if clear:
self.clear()
plt.plot(x_axis, data_y, ls='steps-mid')
def gamma_gamma_spectra(self, gg_id, gate, clear=True):
""" Plots gamma-gamma gate broken into 4 subplots (0-600, 600-1200,
1200-2000, 2000-4000.
gg_id is 2D histogram id
gate is in form ((x1, y1), (x2, y2)) where i=1 is gate on line, i=2
is gate on background
"""
self.clear()
x, y = self.gy(gg_id, gate[0], bg=gate[1])
ranges = ((0, 600), (600, 1200), (1200, 2000), (2000, 4000))
for i, r in enumerate(ranges):
ax = plt.subplot(4, 1, i + 1)
ax.plot(x[r[0]:r[1]], y[r[0]:r[1]], ls='steps-mid')
ax.set_xlim(r)
ax.set_xlabel('E (keV)')
plt.tight_layout()
def annotate(self, x, text, shiftx=0, shifty=0):
""" Add arrow with line energy and possible short text"""
length = 0.07 * (plt.ylim()[1] - plt.ylim()[0])
y = self.current['y'][x]
plt.annotate(text, xy=(x, y),
xytext=(x + shiftx, y + length + shifty),
rotation=90.,
xycoords='data',
fontsize=9,
verticalalignment='bottom',
horizontalalignment='center',
arrowprops=dict(width=1, facecolor='black', headwidth=5,
shrink=0.1))
def load_gates(self, filename):
"""Load gamma gates from text file, the format is:
# Comment line
Name x0 x1 bg0 bg1
Example:
110 111 113 115 117
"""
gatefile = open(filename, 'r')
lineN = 0
gates = {}
for line in gatefile:
lineN += 1
line = line.strip()
if line.startswith('#'):
continue
items = line.split()
if len(items) < 5:
print('Warning: line {} bad data'.format(lineN))
continue
gates[int(items[0])] = ((int(items[1]), int(items[2])),
(int(items[3]), int(items[4])))
return gates
def gamma_time_profile(self, his_id, gate, t_bin=1, rt=None, clear=True):
"""Plots gamma time profile, gate should be given in format:
((x0, x1, (bg0, bg1))
the rt is gate in time in (t0, t1) format"""
xg, yg = self.gx(his_id, rx=gate[0], ry=rt, plot=False)
xb, yb = self.gx(his_id, rx=gate[1], ry=rt, plot=False)
if t_bin > 1:
xg = self.rebin_histogram(xg, t_bin,
False, False)
yg = self.rebin_histogram(yg, t_bin)
yb = self.rebin_histogram(yb, t_bin)
dyg = numpy.sqrt(yg)
dyb = numpy.sqrt(yb)
y = yg - yb
dy = numpy.sqrt(dyg**2 + dyb**2)
if clear:
self.clear()
plt.errorbar(xg, y, yerr=dy, ls='None', marker='o')
plt.axhline(0, ls='-', color='black')
def fit_gamma_decay(self, his_id, gate, cycle,
t_bin=1, rt=None,
model='grow_decay',
pars=None,
clear=True):
"""Fits gamma decay time profile,
his_id is E-time histogram id
gate should be given in format:
((x0, x1, (bg0, bg1))
cycle is list of beam start, beam stop, cycle end, e.g.
(0, 100, 300)
t_bin is a binning parameter
rt is a gate in time in (t0, t1) format
model is model used for fit (see decay_fitter)
pars is a list of dictionaries (one dict per each parameter)
"""
if pars is None:
T0 = {'name' : 'T0', 'value' : cycle[0], 'vary' : False}
T1 = {'name' : 'T1', 'value' : cycle[1], 'vary' : False}
T2 = {'name' : 'T2', 'value' : cycle[2], 'vary' : False}
P1 = {'name' : 'P1', 'value' : 100.0}
t1 = {'name' : 't1', 'value' : 100.0}
parameters = [T0, T1, T2, P1, t1]
if model == 'grow_decay2':
P2 = {'name' : 'P2', 'value' : 1000.0}
t2 = {'name' : 't2', 'value' : 1000.0}
parameters.append(P2)
parameters.append(t2)
else:
parameters = pars
df = DecayFitter()
xg, yg = self.gx(his_id, rx=gate[0], ry=rt, plot=False)
xb, yb = self.gx(his_id, rx=gate[1], ry=rt, plot=False)
if t_bin > 1:
xg = self.rebin_histogram(xg, t_bin,
False, False)
yg = self.rebin_histogram(yg, t_bin)
yb = self.rebin_histogram(yb, t_bin)
dyg = numpy.sqrt(yg)
dyb = numpy.sqrt(yb)
y = yg - yb
dy = numpy.sqrt(dyg**2 + dyb**2)
t, n, parameters = df.fit(xg, y, dy, model, parameters)
if clear:
self.clear()
plt.errorbar(xg, y, yerr=dy, ls='None', marker='o')
plt.plot(t, n, ls='-', color='red')
plt.axhline(0, ls='-', color='black')
return (t, n, parameters)
def fit_peaks(self, rx, his_id=None):
"""Fit gaussian peaks to current plot.
Returns list of lists:
[E, x0, dx, A, dA, s, Area]
where E is name of the peak, x0, A and s are fitted parameters
and d'something' is its uncertainity. Area is total calculated area.
"""
peaks = []
for p in self.peaks:
if rx[0] <= p.get('E') <= rx[1]:
peaks.append(p)
PF = PeakFitter(peaks, 'linear', '')
if his_id is not None:
data = self.hisfile.load_histogram(his_id)
if data[0] != 1:
print('{} is not a 1D histogram'.format(his_id))
return None
x_axis = data[1][rx[0]:rx[1]]
data_y = data[3][rx[0]:rx[1]]
else:
x_axis = self.current['x'][rx[0]:rx[1]]
data_y = self.current['y'][rx[0]:rx[1]]
data_dy = numpy.zeros(len(data_y))
for iy, y in enumerate(data_y):
if y > 0:
data_dy[iy] = math.sqrt(y)
else:
data_dy[iy] = 1
PF.fit(x_axis, data_y, data_dy)
print('#{:^7} {:^8} {:^8} {:^8} {:^8} {:^8} {:^8}'
.format('Peak', 'x0', 'dx', 'A', 'dA', 's', 'Area'))
peak_data = []
for i, peak in enumerate(peaks):
if peak.get('ignore') == 'True':
continue
x0 = PF.params['x{}'.format(i)].value
dx = PF.params['x{}'.format(i)].stderr
A = PF.params['A{}'.format(i)].value
dA = PF.params['A{}'.format(i)].stderr
s = PF.params['s{}'.format(i)].value
Area = PF.find_area(x_axis, i)
print('{:>8} {:>8.2f} {:>8.2f} {:>8.1f} {:>8.1f} {:>8.3f} {:>8.1f}'
.format(peaks[i].get('E'), x0, dx, A, dA, s, Area))
peak_data.append([peaks[i].get('E'), x0, dx, A, dA, s, Area])
return peak_data
def pk(self, E, **kwargs):
p = {'E' : E}
p.update(kwargs)
self.peaks.append(p)
def pzot(self):
self.peaks.clear()
if __name__ == "__main__":
pass
| gpl-3.0 |
jeremiedecock/pyarm | setup.py | 1 | 4441 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyArm
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Here is the procedure to submit updates to PyPI
# ===============================================
#
# 1. Register to PyPI:
#
# $ python3 setup.py register
#
# 2. Upload the source distribution:
#
# $ python3 setup.py sdist upload
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
from pyarm import __version__ as VERSION
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks']
KEYWORDS = 'robotics simultion library'
# You can either specify manually the list of packages to include in the
# distribution or use "setuptools.find_packages()" to include them
# automatically with a recursive search (from the root directory of the
# project).
#PACKAGES = find_packages()
PACKAGES = ['pyarm',
'pyarm.agent',
'pyarm.gui',
'pyarm.model',
'pyarm.model.arm',
'pyarm.model.kinematics',
'pyarm.model.muscle']
# The following list contains all dependencies that Python will try to
# install with this project
INSTALL_REQUIRES = ['numpy', 'matplotlib']
#INSTALL_REQUIRES = []
SCRIPTS = ["scripts/pyarm",
"scripts/pyarm-plot",
"scripts/pyarm-test-model"]
# Entry point can be used to create plugins or to automatically generate
# system commands to call specific functions.
# Syntax: "name_of_the_command_to_make = package.module:function".
ENTRY_POINTS = {}
#ENTRY_POINTS = {
# 'console_scripts': [
# 'pyarmgui = pyarm.gui:run',
# ],
#}
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='Jeremie DECOCK',
author_email='[email protected]',
maintainer='Jeremie DECOCK',
maintainer_email='[email protected]',
name='pyarm',
description='A robotic arm model and simulator.',
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# Where the package can be downloaded
classifiers=CLASSIFIERS,
#license='MIT', # Useless if license is already in CLASSIFIERS
keywords=KEYWORDS,
packages=PACKAGES,
include_package_data=True, # Use the MANIFEST.in file
install_requires=INSTALL_REQUIRES,
#platforms=['Linux'],
#requires=['numpy', 'matplotlib'],
scripts=SCRIPTS,
entry_points=ENTRY_POINTS,
version=VERSION)
| mit |
kcavagnolo/astroML | book_figures/chapter10/fig_autocorrelation.py | 3 | 3015 | """
Autocorrelation Function
------------------------
Figure 10.30
Example of the autocorrelation function for a stochastic process. The top panel
shows a simulated light curve generated using a damped random walk model
(Section 10.5.4). The bottom panel shows the corresponding autocorrelation
function computed using Edelson and Krolik's DCF method and the Scargle method.
The solid line shows the input autocorrelation function used to generate the
light curve.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.time_series import lomb_scargle, generate_damped_RW
from astroML.time_series import ACF_scargle, ACF_EK
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate time-series data:
# we'll do 1000 days worth of magnitudes
t = np.arange(0, 1E3)
z = 2.0
tau = 300
tau_obs = tau / (1. + z)
np.random.seed(6)
y = generate_damped_RW(t, tau=tau, z=z, xmean=20)
# randomly sample 100 of these
ind = np.arange(len(t))
np.random.shuffle(ind)
ind = ind[:100]
ind.sort()
t = t[ind]
y = y[ind]
# add errors
dy = 0.1
y_obs = np.random.normal(y, dy)
#------------------------------------------------------------
# compute ACF via scargle method
C_S, t_S = ACF_scargle(t, y_obs, dy,
n_omega=2. ** 12, omega_max=np.pi / 5.0)
ind = (t_S >= 0) & (t_S <= 500)
t_S = t_S[ind]
C_S = C_S[ind]
#------------------------------------------------------------
# compute ACF via E-K method
C_EK, C_EK_err, bins = ACF_EK(t, y_obs, dy, bins=np.linspace(0, 500, 51))
t_EK = 0.5 * (bins[1:] + bins[:-1])
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
# plot the input data
ax = fig.add_subplot(211)
ax.errorbar(t, y_obs, dy, fmt='.k', lw=1)
ax.set_xlabel('t (days)')
ax.set_ylabel('observed flux')
# plot the ACF
ax = fig.add_subplot(212)
ax.plot(t_S, C_S, '-', c='gray', lw=1,
label='Scargle')
ax.errorbar(t_EK, C_EK, C_EK_err, fmt='.k', lw=1,
label='Edelson-Krolik')
ax.plot(t_S, np.exp(-abs(t_S) / tau_obs), '-k', label='True')
ax.legend(loc=3)
ax.plot(t_S, 0 * t_S, ':', lw=1, c='gray')
ax.set_xlim(0, 500)
ax.set_ylim(-1.0, 1.1)
ax.set_xlabel('t (days)')
ax.set_ylabel('ACF(t)')
plt.show()
| bsd-2-clause |
B3AU/waveTree | examples/linear_model/plot_ard.py | 8 | 2588 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import pylab as pl
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
pl.figure(figsize=(6, 5))
pl.title("Weights of the model")
pl.plot(clf.coef_, 'b-', label="ARD estimate")
pl.plot(ols.coef_, 'r--', label="OLS estimate")
pl.plot(w, 'g-', label="Ground truth")
pl.xlabel("Features")
pl.ylabel("Values of the weights")
pl.legend(loc=1)
pl.figure(figsize=(6, 5))
pl.title("Histogram of the weights")
pl.hist(clf.coef_, bins=n_features, log=True)
pl.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
pl.ylabel("Features")
pl.xlabel("Values of the weights")
pl.legend(loc=1)
pl.figure(figsize=(6, 5))
pl.title("Marginal log-likelihood")
pl.plot(clf.scores_)
pl.ylabel("Score")
pl.xlabel("Iterations")
pl.show()
| bsd-3-clause |
davisincubator/seal_the_deal | notebooks/gen_preds_2.py | 1 | 7881 | import numpy as np
import pandas as pd
import os
import cv2
from PIL import Image
from scipy.misc import imread
import matplotlib.pyplot as plt
import skimage.feature
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import keras
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Lambda, Cropping2D
from keras.utils import np_utils
import tensorflow as tf
from collections import Counter
from keras.models import load_model
import datetime
from tqdm import tnrange, tqdm_notebook, tqdm
# % matplotlib
# inline
class_names = ['adult_females', 'adult_males', 'juveniles', 'pups', 'subadult_males']
# my_dir = "/Volumes/dax/seals/Kaggle-NOAA-SeaLions/"
my_dir = "/seal_the_data/"
mismatch_id = [3, 7, 9, 21, 30, 34, 71, 81, 89, 97, 151, 184, 215, 234, 242, 268, 290, 311, 331, 344, 380, 384, 406,
421, 469, 475, 490, 499, 507, 530, 531, 605, 607, 614, 621, 638, 644, 687, 712, 721, 767, 779, 781, 794,
800, 811, 839, 840, 869, 882, 901, 903, 905, 909, 913, 927, 946]
blacklist = []
for i in mismatch_id:
blacklist.append(str(i) + '.jpg')
print(blacklist[:5])
blacklist.append('train.csv')
print(blacklist)
file_names = os.listdir(my_dir + "Train/")
file_names = sorted(file_names, key=lambda
item: (int(item.partition('.')[0]) if item[0].isdigit() else float('inf'), item))
# select a subset of files to run on
file_names = file_names[0:1]
# dataframe to store results in
coordinates_df = pd.DataFrame(index=file_names, columns=class_names)
# print(file_names[:])
for filename in file_names:
if filename in blacklist:
file_names.remove(filename)
else:
# read the Train and Train Dotted images
image_1 = cv2.imread(my_dir + "/TrainDotted/" + filename)
image_2 = cv2.imread(my_dir + "/Train/" + filename)
cut = np.copy(image_2)
# absolute difference between Train and Train Dotted
image_3 = cv2.absdiff(image_1, image_2)
# mask out blackened regions from Train Dotted
mask_1 = cv2.cvtColor(image_1, cv2.COLOR_BGR2GRAY)
mask_1[mask_1 < 20] = 0
mask_1[mask_1 > 0] = 255
mask_2 = cv2.cvtColor(image_2, cv2.COLOR_BGR2GRAY)
mask_2[mask_2 < 20] = 0
mask_2[mask_2 > 0] = 255
image_3 = cv2.bitwise_or(image_3, image_3, mask=mask_1)
image_3 = cv2.bitwise_or(image_3, image_3, mask=mask_2)
# convert to grayscale to be accepted by skimage.feature.blob_log
image_3 = cv2.cvtColor(image_3, cv2.COLOR_BGR2GRAY)
# detect blobs
blobs = skimage.feature.blob_log(image_3, min_sigma=3, max_sigma=4, num_sigma=1, threshold=0.02)
adult_males = []
subadult_males = []
pups = []
juveniles = []
adult_females = []
image_circles = image_1
for blob in blobs:
# get the coordinates for each blob
y, x, s = blob
# get the color of the pixel from Train Dotted in the center of the blob
g, b, r = image_1[int(y)][int(x)][:]
# decision tree to pick the class of the blob by looking at the color in Train Dotted
if r > 200 and g < 50 and b < 50: # RED
adult_males.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (0, 0, 255), 10)
elif r > 200 and g > 200 and b < 50: # MAGENTA
subadult_males.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (250, 10, 250), 10)
elif r < 100 and g < 100 and 150 < b < 200: # GREEN
pups.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (20, 180, 35), 10)
elif r < 100 and 100 < g and b < 100: # BLUE
juveniles.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (180, 60, 30), 10)
elif r < 150 and g < 50 and b < 100: # BROWN
adult_females.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (0, 42, 84), 10)
cv2.rectangle(cut, (int(x) - 112, int(y) - 112), (int(x) + 112, int(y) + 112), 0, -1)
coordinates_df["adult_males"][filename] = adult_males
coordinates_df["subadult_males"][filename] = subadult_males
coordinates_df["adult_females"][filename] = adult_females
coordinates_df["juveniles"][filename] = juveniles
coordinates_df["pups"][filename] = pups
x = []
y = []
for filename in tqdm(file_names):
image = cv2.imread(my_dir + "/Train/" + filename)
for lion_class in class_names:
try:
for coordinates in coordinates_df[lion_class][filename]:
thumb = image[coordinates[1] - 32:coordinates[1] + 32, coordinates[0] - 32:coordinates[0] + 32, :]
if np.shape(thumb) == (64, 64, 3):
x.append(thumb)
y.append(lion_class)
except:
pass
for i in range(0, np.shape(cut)[0], 224):
for j in range(0, np.shape(cut)[1], 224):
thumb = cut[i:i + 64, j:j + 64, :]
if np.amin(cv2.cvtColor(thumb, cv2.COLOR_BGR2GRAY)) != 0:
if np.shape(thumb) == (64, 64, 3):
x.append(thumb)
y.append("negative")
class_names.append("negative")
x = np.array(x)
y = np.array(y)
encoder = LabelBinarizer()
encoder.fit(y)
y = encoder.transform(y).astype(float)
my_model = '2017-06-25_model.h5' # what is the model file named?
model = load_model(my_dir + my_model)
test_file_names = os.listdir(my_dir + "Test/")
test_file_names = sorted(test_file_names, key=lambda
item: (int(item.partition('.')[0]) if item[0].isdigit() else float('inf'), item))
# select a subset of files to run on
# test_file_names = test_file_names[0:7]
print(len(test_file_names)) # 18636
#test_file_names = test_file_names[0:2000]
test_file_names = test_file_names[2000:4000]
# test_file_names = test_file_names[4000:6000]
# test_file_names = test_file_names[6000:8000]
# test_file_names = test_file_names[8000:10000]
# test_file_names = test_file_names[10000:12000]
# test_file_names = test_file_names[12000:14000]
# test_file_names = test_file_names[14000:]
print(len(test_file_names)) #
# dataframe to store results in
test_coordinates_df = pd.DataFrame(0, index=test_file_names, columns=class_names)
# print(test_file_names[:5])
# print(test_coordinates_df)
# GPU 2
with tf.device('/gpu:1'):
for filename in tqdm(test_file_names):
file_int = int(filename[:-4])
current_time = datetime.datetime.now().time().isoformat()[:5]
if file_int % 500 == 0:
print('completed %d images at %s' % (file_int, current_time))
img = cv2.imread(my_dir + "Test/" + filename)
x_test = []
for i in range(0, np.shape(img)[0], 64):
for j in range(0, np.shape(img)[1], 64):
thumb = img[i:i + 64, j:j + 64, :]
if np.shape(thumb) == (64, 64, 3):
x_test.append(thumb)
x_test = np.array(x_test)
y_predicted = model.predict(x_test, verbose=0)
y_predicted = encoder.inverse_transform(y_predicted)
the_counter = Counter(y_predicted)
# print(the_counter)
for key in the_counter:
test_coordinates_df.set_value(index=filename, col=key, value=the_counter[key])
protect_df = test_coordinates_df
# print(test_coordinates_df)
del test_coordinates_df['negative']
test_coordinates_df = test_coordinates_df[['adult_males', 'subadult_males', 'adult_females', 'juveniles', 'pups']]
print(test_coordinates_df)
test_coordinates_df.to_csv(my_dir + datetime.date.today().isoformat() + '_submission_pt2.csv') | mit |
murali-munna/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
licco/zipline | zipline/utils/tradingcalendar_bmf.py | 5 | 7753 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from zipline.utils.tradingcalendar import end, canonicalize_datetime
start = pd.Timestamp('1994-01-01', tz='UTC')
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
# Universal confraternization
conf_universal = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(conf_universal)
# Sao Paulo city birthday
aniversario_sao_paulo = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(aniversario_sao_paulo)
# Carnival Monday
carnaval_segunda = rrule.rrule(
rrule.MONTHLY,
byeaster=-48,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(carnaval_segunda)
# Carnival Tuesday
carnaval_terca = rrule.rrule(
rrule.MONTHLY,
byeaster=-47,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(carnaval_terca)
# Passion of the Christ
sexta_paixao = rrule.rrule(
rrule.MONTHLY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(sexta_paixao)
# Corpus Christi
corpus_christi = rrule.rrule(
rrule.MONTHLY,
byeaster=60,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(corpus_christi)
tiradentes = rrule.rrule(
rrule.MONTHLY,
bymonth=4,
bymonthday=21,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(tiradentes)
# Labor day
dia_trabalho = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
bymonthday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(dia_trabalho)
# Constitutionalist Revolution
constitucionalista = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=9,
cache=True,
dtstart=datetime(1997, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(constitucionalista)
# Independency day
independencia = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
bymonthday=7,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(independencia)
# Our Lady of Aparecida
aparecida = rrule.rrule(
rrule.MONTHLY,
bymonth=10,
bymonthday=12,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(aparecida)
# All Souls' day
finados = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(finados)
# Proclamation of the Republic
proclamacao_republica = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=15,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(proclamacao_republica)
# Day of Black Awareness
consciencia_negra = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=20,
cache=True,
dtstart=datetime(2004, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(consciencia_negra)
# Christmas Eve
vespera_natal = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(vespera_natal)
# Christmas
natal = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(natal)
# New Year Eve
ano_novo = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=31,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(ano_novo)
# New Year Eve on saturday
ano_novo_sab = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=30,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(ano_novo_sab)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
# Ash Wednesday
quarta_cinzas = rrule.rrule(
rrule.MONTHLY,
byeaster=-46,
cache=True,
dtstart=start,
until=end
)
def get_early_closes(start, end):
# TSX closed at 1:00 PM on december 24th.
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
early_close_rules = []
early_close_rules.append(quarta_cinzas)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_closes(trading_days, early_closes):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
for day in trading_days:
# only "early close" event in Bovespa actually is a late start
# as the market only opens at 1pm
open_hour = 13 if day in quarta_cinzas else 10
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=open_hour,
minute=00),
tz='America/Sao_Paulo').tz_convert('UTC')
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=16),
tz='America/Sao_Paulo').tz_convert('UTC')
open_and_closes.loc[day, 'market_open'] = market_open
open_and_closes.loc[day, 'market_close'] = market_close
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes)
| apache-2.0 |
googlearchive/rgc-models | prosthesis/stimulation_pgd.py | 1 | 25671 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stimulation algorithm for prosthesis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import scipy
import pickle
import copy
import os
import cvxpy
import tensorflow as tf
from tensorflow.python.platform import gfile
tf.flags.DEFINE_string('Algorithm', 'simultaneous_planning',
'Planning algorithm to use')
tf.flags.DEFINE_float('learning_rate',
100,
'Learning rate for optimization.')
tf.flags.DEFINE_integer('t_max',
20,
'Maximum number of stimulations')
tf.flags.DEFINE_integer('delta',
5,
'Maximum number of stimulations')
tf.flags.DEFINE_string('normalization',
'C',
'Normalization ')
tf.flags.DEFINE_string('save_dir',
'/home/bhaishahster/stimulation_algos/pgd/',
'Directory to store results.')
FLAGS = flags.FLAGS
def main(unused_argv=()):
src = '/home/bhaishahster/Stimulation_data.pkl'
data = pickle.load(gfile.Open(src, 'r'))
S_collection = data['S'] # Target
A = data['A'] # Decoder
D = data['D'].T # Dictionary
for itarget in range(S_collection.shape[1]):
S = S_collection[:, itarget]
# Run Greedy first to initialize
if FLAGS.Algorithm == 'greedy':
x_greedy = greedy_stimulation(S, A, D, max_stims = FLAGS.t_max * FLAGS.delta,
file_suffix='%d' % itarget, save=True, save_dir=FLAGS.save_dir)
if FLAGS.Algorithm == 'simultaneous_planning':
x_greedy = greedy_stimulation(S, A, D, max_stims = FLAGS.t_max * FLAGS.delta,
file_suffix='%d' % itarget, save=False, save_dir=FLAGS.save_dir)
# Plan for multiple time points
x_init = np.zeros((x_greedy.shape[0], FLAGS.t_max))
#from IPython import embed; embed()
for it in range(FLAGS.t_max):
print((it + 1) * FLAGS.delta - 1)
x_init[:, it] = x_greedy[:, (it + 1) * FLAGS.delta - 1]
simultaneous_planning(S, A, D, t_max=FLAGS.t_max, lr=FLAGS.learning_rate,
delta=FLAGS.delta, normalization=FLAGS.normalization,
file_suffix='%d' % itarget, x_init=x_init, save_dir=FLAGS.save_dir)
if FLAGS.Algorithm == 'simultaneous_planning_cvx':
simultaneous_planning_cvx(S, A, D, t_max=FLAGS.t_max,
delta=FLAGS.delta,
file_suffix='%d' % itarget, save_dir=FLAGS.save_dir)
def greedy_stimulation(S, A, D, save_dir='', max_stims = 100, file_suffix='', save=False):
'''Greedily select stimulation pattern for each step.'''
n_dict_elem = D.shape[1]
# compute variance of dictionary elements.
stas_norm = np.expand_dims(np.sum(A ** 2, 0) ,0) # 1 x # cells
var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D))) # # dict
AD = A.dot(D)
x = np.zeros(n_dict_elem)
current_mean_percept = A.dot(D.dot(x))
x_chosen = np.zeros((n_dict_elem, max_stims))
for istim in range(max_stims):
print(istim)
errs = np.sum((np.expand_dims(S - current_mean_percept, 1) - AD) ** 2, 0) + var_dict
chosen_dict = np.argmin(errs)
min_e_d = errs[chosen_dict]
'''
# Compute objective value
min_e_d = np.inf
for idict in range(n_dict_elem):
diff = S - current_mean_percept - AD[:, idict]
error = np.sum(diff ** 2, 0) + var_dict[idict]
if error < min_e_d:
chosen_dict = idict
min_e_d = error
'''
x[chosen_dict] += 1
current_mean_percept = A.dot(D.dot(x))
x_chosen[chosen_dict, istim] = 1
# Final Error
x_chosen = np.cumsum(x_chosen, 1)
error_curve = compute_error(S, A, D, var_dict, x_chosen)
if save:
save_dict = {'error_curve': error_curve, 'x_chosen': x_chosen, 'x': x}
pickle.dump(save_dict,
gfile.Open(os.path.join(save_dir,
'greedy_%d_%s.pkl' %
(max_stims, file_suffix)),
'w'))
return x_chosen
def compute_error(S, A, D, var_dict, x_chosen):
diff = np.expand_dims(S, 1) - A.dot(D.dot(x_chosen))
return np.sum(diff ** 2, 0) + np.dot(var_dict, x_chosen)
def simultaneous_planning_cvx(S, A, D, t_max = 2000, delta = 5,
file_suffix='', save_dir=''):
# Setup problem parameters
# make p_tau uniform between 500 and 2000
p_tau = np.ones(t_max)
p_tau[:5] = 0
p_tau = p_tau / np.sum(p_tau)
n_dict_elem = D.shape[1]
# compute variance of dictionary elements.
stas_norm = np.expand_dims(np.sum(A ** 2, 0) ,0) # 1 x # cells
var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D))) # # dict
# Construct the problem.
y = cvxpy.Variable(n_dict_elem, t_max)
x = cvxpy.cumsum(y, 1)
S_expanded = np.repeat(np.expand_dims(S, 1), t_max, 1)
objective = cvxpy.Minimize((cvxpy.sum_entries((S_expanded - A * (D * x))**2, 0) + var_dict * x) * p_tau)
constraints = [0 <= y, cvxpy.sum_entries(y, 0).T <= delta * np.ones((1, t_max)).T]
prob = cvxpy.Problem(objective, constraints)
# The optimal objective is returned by prob.solve().
result = prob.solve(verbose=True)
# The optimal value for x is stored in x.value.
print(x.value)
# The optimal Lagrange multiplier for a constraint
# is stored in constraint.dual_value.
print(constraints[0].dual_value)
def simultaneous_planning(S, A, D, save_dir='', t_max = 2000, lr=0.01,
normalization='T-i', delta = 5,
file_suffix='', x_init=None):
''' Solve the simultaneous planning constrained optimization problem.
Let xi be the set of electrodes played till time i.
Let the distribution of saccades be p(tau).
Optimization problem.
Min E_tau ||S - ADx_tau||^2
subject to -
x_{i+1} >= x_{i} forall i
|x_i|_1 <= i forall i
x_i >= 0 forall i.
Solve using projected gradient descent.
'''
# Compute expanded quantities
S_expanded = np.repeat(np.expand_dims(S, 1), t_max, 1)
if normalization == 'T-i':
normalizing_factors = np.array([t_max - i for i in range(t_max)])
if normalization == 'sqrt(T-i)':
normalizing_factors = np.sqrt(np.array([t_max - i for i in range(t_max)]))
if normalization == 'C':
normalizing_factors = (t_max / 2) + 0 * np.array([t_max - i for i in range(t_max)])
# make p_tau uniform between 500 and 2000
p_tau = np.ones(t_max)
# TODO(bhaishahster): Dont hardcode p_tau!!
p_tau[:5] = 0
p_tau = p_tau / np.sum(p_tau)
n_dict_elem = D.shape[1]
# TODO(bhaishahster): Find better initialization.
# Initialize
if x_init is not None:
y_init_normalized = np.zeros_like(x_init) # successive difference of x.
y_init_normalized[:, 0] = x_init[:, 0]
for iy in np.arange(1, y_init_normalized.shape[1]):
y_init_normalized[:, iy] = x_init[:, iy] - x_init[:, iy - 1]
y_init = y_init_normalized * np.expand_dims(normalizing_factors, 0) #
else:
# Zero initialization
y_init = np.zeros((n_dict_elem, t_max))
# Smarter initialization
#x_init = np.linalg.pinv(D).dot(np.linalg.pinv(A).dot(S_expanded))
#x_init = project_constraints(x_init)
#
# Do projected gradient descent
y = y_init.copy()
f_log = []
# compute variance of dictionary elements.
stas_norm = np.expand_dims(np.sum(A ** 2, 0) ,0) # 1 x # cells
var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D))) # # dict
radii_normalized = (np.ones(y.shape[1])) * delta
radii = np.multiply(normalizing_factors, radii_normalized )
x_log = []
y_best = []
f_min = np.inf
for iiter in range(4000):
if iiter % 500 == 499:
lr = lr * 0.3
# compute x from y
y_normalized = y / np.expand_dims(normalizing_factors, 0)
x = np.cumsum(y_normalized, 1)
x_log += [x]
# Compute objective value
diff = S_expanded - A.dot(D.dot(x))
errors = np.sum(diff ** 2, 0) + np.dot(var_dict, x)
f = errors.dot(p_tau)
print('Iterate: %d, Function value : %.3f' % (iiter, f))
f_log += [f]
if f < f_min:
f_min = f
y_best = y
# Gradients step
grad = (D.T.dot(A.T.dot((S_expanded - A.dot(D.dot(x))))) - np.expand_dims(var_dict, 1)) * np.expand_dims(p_tau, 0)
# collect gradient for each y. - new formulation that Kunal suggested.
grad_y = np.cumsum(grad[:, ::-1], 1)
grad_y = grad_y[:, ::-1] / np.expand_dims(normalizing_factors, 0)
# y = y + (lr / np.sqrt(iiter + 1)) * grad_y
y = y + (lr) * grad_y
# Project to constraint set
y = project_l1_pos(y, radii)
'''
if iiter > 2:
if np.abs(f_log[-2] - f_log[-1]) < 1e-5:
# compute x from y
y_normalized = y / np.expand_dims(normalizing_factors, 0)
x = np.cumsum(y_normalized, 1)
break
'''
y = y_best
y_normalized = y / np.expand_dims(normalizing_factors, 0)
x = np.cumsum(y_normalized, 1)
diff = S_expanded - A.dot(D.dot(x))
errors = np.sum(diff ** 2, 0) + np.dot(var_dict, x)
# Randomized rounding
x_rr_discrete = randomized_rounding(x)
errors_rr_discrete = compute_error(S, A, D, var_dict, x_rr_discrete)
# Hard thresholding
y_ht_discrete = hard_thresholding(y, radii)
y_ht_discrete_normalized = y_ht_discrete / np.expand_dims(normalizing_factors, 0)
x_ht_discrete = np.cumsum(y_ht_discrete_normalized, 1)
errors_ht_discrete = compute_error(S, A, D, var_dict, x_ht_discrete)
x_log = np.array(x_log)
x_decrease = np.sum((x_log - x_log[-1, :, :]) ** 2, 1)
x_dec_best = np.sum((x_log - x[:, :]) ** 2, 1)
x_last = x_log[-1]
save_dict = {'x': x, 'x_rr_discrete': x_rr_discrete,
'x_ht_discrete': x_ht_discrete,
'errors': errors, 'x_decrease': x_decrease,
'x_dec_best': x_dec_best, 'x_last': x_last,
'errors_rr_discrete': errors_rr_discrete,
'errors_ht_discrete': errors_ht_discrete,
'radii_normalized': radii_normalized,
'radii': radii, 'normalizing_factors': normalizing_factors,
'f_log': f_log, 'y': y, 'y_ht_discrete': y_ht_discrete, 'S': S,
'A': A, 'D': D}
pickle.dump(save_dict,
gfile.Open(os.path.join(save_dir,
'pgd_%d_%.6f_%s_%d_%s.pkl' %(t_max, lr,
normalization,
delta,
file_suffix)),
'w'))
def randomized_rounding(x):
'''Randomized rounding.'''
# Discretize
thresholds = np.random.rand(x.shape[0])
x_discrete = np.zeros_like(x)
for idict in range(x.shape[0]):
for itime in range(x.shape[1]):
x_discrete[idict, itime] = np.ceil(x[idict, itime] - thresholds[idict])
return x_discrete
def hard_thresholding(y, radii):
'''Hard thresholding of y.'''
y_discrete = np.zeros_like(y)
for t in range(y.shape[1]):
l1_radius = radii[t]
idx = np.argsort(y[:, t])[::-1]
for iidx in idx:
y_discrete[iidx, t] = np.ceil(y[iidx, t])
if y_discrete[:, t].sum() >= l1_radius:
break
return y_discrete
def project_l1_pos(x, radii):
'''Numpy implementation of L1 projection.'''
# Project to Positvity constrain
x = np.maximum(x, 0)
# numpy implementation of L1 projection
for t in range(x.shape[1]):
l1_radius = radii[t]
if np.sum(x[:, t]) < l1_radius:
continue
vals = np.sort(x[:, t])
F = np.cumsum(vals[::-1])[::-1] # Compute inverse cumulative value to efficiently search of lambda.
prev_v = np.min(x[:, t])
for iiv, iv in enumerate(vals):
if iv == 0:
continue
if F[iiv] - (vals.shape[0] - iiv) * iv < l1_radius :
break
prev_v = iv
vals = np.maximum(x[:, t] - prev_v, 0)
violation = np.sum(vals) - l1_radius
nnz = np.sum(vals > 0)
shift = violation / nnz
x[:, t] = np.maximum(vals - shift, 0)
return x
def simultaneous_planning_interleaved_discretization(S, A, D, save_dir='', t_max = 2000, lr=0.01,
normalization='T-i', delta = 5,
file_suffix='', x_init=None, freeze_freq=200, steps_max=3999):
''' Solve the simultaneous planning constrained opt problem with interleaved discretization.
Let xi be the set of electrodes played till time i.
Let the distribution of saccades be p(tau).
Optimization problem.
Min E_tau ||S - ADx_tau||^2
subject to -
x_{i+1} >= x_{i} forall i
|x_i|_1 <= i forall i
x_i >= 0 forall i.
Solve using projected gradient descent.
'''
# Compute expanded quantities
S_expanded = np.repeat(np.expand_dims(S, 1), t_max, 1)
S_normalize = np.sum(S ** 2)
if normalization == 'T-i':
normalizing_factors = np.array([t_max - i for i in range(t_max)])
if normalization == 'sqrt(T-i)':
normalizing_factors = np.sqrt(np.array([t_max - i for i in range(t_max)]))
if normalization == 'C':
normalizing_factors = (t_max / 2) + 0 * np.array([t_max - i for i in range(t_max)])
# make p_tau uniform between 500 and 2000
p_tau = np.ones(t_max)
# TODO(bhaishahster): Dont hardcode p_tau!!
p_tau[:5] = 0
p_tau = p_tau / np.sum(p_tau)
n_dict_elem = D.shape[1]
# TODO(bhaishahster): Find better initialization.
# Initialize
if x_init is not None:
y_init_normalized = np.zeros_like(x_init) # successive difference of x.
y_init_normalized[:, 0] = x_init[:, 0]
for iy in np.arange(1, y_init_normalized.shape[1]):
y_init_normalized[:, iy] = x_init[:, iy] - x_init[:, iy - 1]
y_init = y_init_normalized * np.expand_dims(normalizing_factors, 0) #
else:
# Zero initialization
y_init = np.zeros((n_dict_elem, t_max))
# Smarter initialization
#x_init = np.linalg.pinv(D).dot(np.linalg.pinv(A).dot(S_expanded))
#x_init = project_constraints(x_init)
#
# Do projected gradient descent
y = y_init.copy()
f_log = []
# compute variance of dictionary elements.
stas_norm = np.expand_dims(np.sum(A ** 2, 0) ,0) # 1 x # cells
var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D))) # # dict
radii_normalized = (np.ones(y.shape[1])) * delta
radii = np.multiply(normalizing_factors, radii_normalized )
x_log = []
y_best = []
f_min = np.inf
training_indices = np.arange(y.shape[1])
#grad_sq_log = np.zeros_like(y) + 0.001
to_freeze = False
for iiter in range(steps_max):
'''
if iiter % 500 == 499:
lr = lr * 0.3
'''
if (iiter % freeze_freq == freeze_freq - 1) or to_freeze:
if training_indices.size == 0:
print('Everything frozen, Exiting..')
break
frozen_index = training_indices[0]
print('Freezing %d' % frozen_index)
training_indices = training_indices[1:]
y[:, frozen_index] = np.squeeze(hard_thresholding(np.expand_dims(y[:, frozen_index], 1), np.array([radii[frozen_index]])))
#grad_sq_log = np.zeros_like(y) + 0.001
to_freeze = False
'''
try:
plt.ion()
plt.plot(f_log)
plt.show()
plt.draw()
plt.pause(0.5)
except:
pass
'''
# compute x from y
y_normalized = y / np.expand_dims(normalizing_factors, 0)
x = np.cumsum(y_normalized, 1)
# x_log += [x]
# Compute objective value
diff = S_expanded - A.dot(D.dot(x))
errors = np.sum(diff ** 2, 0) + np.dot(var_dict, x)
f = errors.dot(p_tau)
print('Iterate: %d, Function value : %.7f' % (iiter, f / S_normalize))
f_log += [f]
if f < f_min:
f_min = f
y_best = np.copy(y)
# from IPython import embed; embed()
# Gradients step
grad = (D.T.dot(A.T.dot((S_expanded - A.dot(D.dot(x))))) - np.expand_dims(var_dict, 1)) * np.expand_dims(p_tau, 0)
# collect gradient for each y.
grad_y = np.cumsum(grad[:, ::-1], 1)
grad_y = grad_y[:, ::-1] / np.expand_dims(normalizing_factors, 0)
# y = y + (lr / np.sqrt(iiter + 1)) * grad_y
#y[:, training_indices] = y[:, training_indices] + (lr / np.sqrt((iiter % freeze_freq) + 1)) * grad_y[:, training_indices]
y[:, training_indices] = y[:, training_indices] + (lr) * grad_y[:, training_indices] # fixed learning rate!!
#
# Adagrad
#grad_sq_log += grad_y ** 2
#y[:, training_indices] = y[:, training_indices] + (lr) * (grad_y[:, training_indices] / np.sqrt(grad_sq_log))
# Project to constraint set
if len(y[:, training_indices].shape) > 1:
y[:, training_indices] = project_l1_pos(y[:, training_indices], np.array(radii[training_indices]))
else :
y[:, training_indices] = np.squeeze(project_l1_pos(np.expand_dims(y[:, training_indices], 1), np.array([radii[training_indices]])))
if iiter > 2:
if np.abs(f_log[-2] - f_log[-1]) < 1e-5:
if freeze_freq < np.inf:
to_freeze = True
'''
if iiter > 2:
if np.abs(f_log[-2] - f_log[-1]) < 1e-5:
# compute x from y
y_normalized = y / np.expand_dims(normalizing_factors, 0)
x = np.cumsum(y_normalized, 1)
break
'''
if freeze_freq == np.inf:
print('taking the best value')
y = y_best # -> Take the best value.
y_normalized = y / np.expand_dims(normalizing_factors, 0)
x = np.cumsum(y_normalized, 1)
diff = S_expanded - A.dot(D.dot(x))
errors = np.sum(diff ** 2, 0) + np.dot(var_dict, x)
# Randomized rounding
x_rr_discrete = randomized_rounding(x)
errors_rr_discrete = compute_error(S, A, D, var_dict, x_rr_discrete)
# Hard thresholding
y_ht_discrete = hard_thresholding(y, radii)
y_ht_discrete_normalized = y_ht_discrete / np.expand_dims(normalizing_factors, 0)
x_ht_discrete = np.cumsum(y_ht_discrete_normalized, 1)
errors_ht_discrete = compute_error(S, A, D, var_dict, x_ht_discrete)
#x_log = np.array(x_log)
#x_decrease = np.sum((x_log - x_log[-1, :, :]) ** 2, 1)
#x_dec_best = np.sum((x_log - x[:, :]) ** 2, 1)
#x_last = x_log[-1]
save_dict = {'x': x, 'x_rr_discrete': x_rr_discrete,
'x_ht_discrete': x_ht_discrete,
# 'x_decrease': x_decrease,
# 'x_dec_best': x_dec_best, 'x_last': x_last,
'errors': errors,
'errors_rr_discrete': errors_rr_discrete,
'errors_ht_discrete': errors_ht_discrete,
'radii_normalized': radii_normalized,
'radii': radii, 'normalizing_factors': normalizing_factors,
'f_log': f_log, 'y': y, 'y_ht_discrete': y_ht_discrete, 'S': S,
'A': A, 'D': D}
normalize = np.sum(S ** 2)
#plt.plot(f_log/normalize)
#plt.axhline(errors_ht_discrete[5:].mean()/normalize, color='g')
pickle.dump(save_dict,
gfile.Open(os.path.join(save_dir,
'pgd_%d_%.6f_%s_%d_%s.pkl' %(t_max, lr,
normalization,
delta,
file_suffix)),
'w'))
def simultaneous_planning_interleaved_discretization_exp_gradient(S, A, D, save_dir='', t_max = 2000, lr=0.01,
normalization='T-i', delta = 5,
file_suffix='', x_init=None, freeze_freq=200, steps_max=3999):
''' Solve the simultaneous planning constrained opt problem with interleaved discretization.
Let xi be the set of electrodes played till time i.
Let the distribution of saccades be p(tau).
Optimization problem.
Min E_tau ||S - ADx_tau||^2
subject to -
x_{i+1} >= x_{i} forall i
|x_i|_1 <= i forall i
x_i >= 0 forall i.
Solve using projected gradient descent.
'''
# Compute expanded quantities
S_expanded = np.repeat(np.expand_dims(S, 1), t_max, 1)
S_normalize = np.sum(S ** 2)
# make p_tau uniform between 500 and 2000
p_tau = np.ones(t_max)
# TODO(bhaishahster): Dont hardcode p_tau!!
p_tau[:5] = 0
p_tau = p_tau / np.sum(p_tau)
n_dict_elem = D.shape[1]
# TODO(bhaishahster): Find better initialization.
# Initialize
if x_init is not None:
y_init = np.zeros_like(x_init) # successive difference of x.
y_init[:, 0] = x_init[:, 0]
for iy in np.arange(1, y_init.shape[1]):
y_init[:, iy] = x_init[:, iy] - x_init[:, iy - 1]
# Add a small delta to every element
y_init_ones = np.ones((n_dict_elem, t_max))
y_init_ones = delta * y_init_ones / y_init_ones.sum(0)
alpha = 0.9
y_init = alpha * y_init + (1-alpha) * y_init_ones
y_min = np.min(0.0001 * (1 - alpha) * y_init_ones)
else:
# One initialization
y_init = np.ones((n_dict_elem, t_max))
y_init = delta * y_init / y_init.sum(0)
# Do exponential weighing
y = y_init.copy()
f_log = []
# compute variance of dictionary elements.
stas_norm = np.expand_dims(np.sum(A ** 2, 0) ,0) # 1 x # cells
var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D))) # # dict
radii = (np.ones(y.shape[1])) * delta
x_log = []
y_best = []
f_min = np.inf
training_indices = np.arange(y.shape[1])
to_freeze = False
for iiter in range(steps_max):
if (iiter % freeze_freq == freeze_freq - 1) or to_freeze:
if training_indices.size == 0:
print('Everything frozen, Exiting..')
break
frozen_index = training_indices[0]
print('Freezing %d' % frozen_index)
training_indices = training_indices[1:]
y[:, frozen_index] = np.squeeze(hard_thresholding(np.expand_dims(y[:, frozen_index], 1), np.array([radii[frozen_index]])))
# refresh non-frozen entries
#y_init_ones = np.ones((n_dict_elem, t_max))
#y_init_ones = delta * y_init_ones / y_init_ones.sum(0)
#alpha = 0.9
#y[:, training_indices] = alpha * y[:, training_indices] + (1-alpha) * y_init_ones[:, training_indices]
to_freeze = False
'''
try:
plt.ion()
plt.plot(f_log)
plt.show()
plt.draw()
plt.pause(0.5)
except:
pass
'''
# compute x from y
x = np.cumsum(y, 1)
# x_log += [x]
# Compute objective value
diff = S_expanded - A.dot(D.dot(x))
errors = np.sum(diff ** 2, 0) + np.dot(var_dict, x)
f = errors.dot(p_tau)
print('Iterate: %d, y_min : %.9f, Function value : %.7f' % (iiter, y_min, f / S_normalize))
f_log += [f]
if f < f_min:
f_min = f
y_best = np.copy(y)
# Gradients step
grad = (D.T.dot(A.T.dot((S_expanded - A.dot(D.dot(x))))) - np.expand_dims(var_dict, 1)) * np.expand_dims(p_tau, 0)
# collect gradient for each y.
grad_y = np.cumsum(grad[:, ::-1], 1)
grad_y = grad_y[:, ::-1]
y[:, training_indices] = y[:, training_indices] * np.exp(lr * grad_y[:, training_indices] / delta)
# Keep small elements from going to -inf
y[:, training_indices] = delta * y[:, training_indices] / np.sum(y[:, training_indices], 0) # Keeps y normalized
y[:, training_indices] = np.maximum(y[:, training_indices], y_min)
if iiter > 2:
if np.abs(f_log[-2] - f_log[-1]) < 1e-8:
if freeze_freq < np.inf:
to_freeze = True
if freeze_freq == np.inf:
print('taking the best value')
y = y_best # -> Take the best value.
# use last value of y.
x = np.cumsum(y, 1)
diff = S_expanded - A.dot(D.dot(x))
errors = np.sum(diff ** 2, 0) + np.dot(var_dict, x)
# Randomized rounding
x_rr_discrete = randomized_rounding(x)
errors_rr_discrete = compute_error(S, A, D, var_dict, x_rr_discrete)
# Hard thresholding
y_ht_discrete = hard_thresholding(y, radii)
x_ht_discrete = np.cumsum(y_ht_discrete, 1)
errors_ht_discrete = compute_error(S, A, D, var_dict, x_ht_discrete)
save_dict = {'x': x, 'x_rr_discrete': x_rr_discrete,
'x_ht_discrete': x_ht_discrete,
'errors': errors,
'errors_rr_discrete': errors_rr_discrete,
'errors_ht_discrete': errors_ht_discrete,
'radii': radii,
'f_log': f_log, 'y': y, 'y_ht_discrete': y_ht_discrete, 'S': S,
'A': A, 'D': D}
normalize = np.sum(S ** 2)
#plt.plot(f_log/normalize)
#plt.axhline(errors_ht_discrete[5:].mean()/normalize, color='g')
pickle.dump(save_dict,
gfile.Open(os.path.join(save_dir,
'pgd_%d_%.6f_%s_%d_%s.pkl' %(t_max, lr,
normalization,
delta,
file_suffix)),
'w'))
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
rajat1994/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
ameya005/ameya005.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
mjasher/gac | GAC/flopy/utils/datafile.py | 1 | 16152 | """
Module to read MODFLOW output files. The module contains shared
abstract classes that should not be directly accessed.
"""
from __future__ import print_function
import numpy as np
import flopy.utils
class Header():
"""
The header class is an abstract base class to create headers for MODFLOW files
"""
def __init__(self, filetype=None, precision='single'):
floattype = 'f4'
if precision == 'double':
floattype = 'f8'
self.header_types = ['head', 'ucn']
if filetype is None:
self.header_type = None
else:
self.header_type = filetype.lower()
if self.header_type in self.header_types:
if self.header_type == 'head':
self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'),
('pertim', floattype), ('totim', floattype),
('text', 'a16'),
('ncol', 'i4'), ('nrow', 'i4'), ('ilay', 'i4')])
elif self.header_type == 'ucn':
self.dtype = np.dtype([('ntrans', 'i4'), ('kstp', 'i4'), ('kper', 'i4'),
('totim', floattype), ('text', 'a16'),
('ncol', 'i4'), ('nrow', 'i4'), ('ilay', 'i4')])
self.header = np.ones(1, self.dtype)
else:
self.dtype = None
self.header = None
print('Specified {0} type is not available. Available types are:'.format(self.header_type))
for idx, t in enumerate(self.header_types):
print(' {0} {1}'.format(idx+1, t))
return
def get_dtype(self):
"""
Return the dtype
"""
return self.dtype
def get_names(self):
"""
Return the dtype names
"""
return self.dtype.names
def get_values(self):
"""
Return the header values
"""
if self.header is None:
return None
else:
return self.header[0]
class LayerFile(object):
"""
The LayerFile class is the abstract base class from which specific derived
classes are formed. LayerFile This class should not be instantiated directly.
"""
def __init__(self, filename, precision, verbose, kwargs):
self.filename = filename
self.precision = precision
self.verbose = verbose
self.file = open(self.filename, 'rb')
self.nrow = 0
self.ncol = 0
self.nlay = 0
self.times = []
self.kstpkper = []
self.recordarray = []
self.iposarray = []
if precision == 'single':
self.realtype = np.float32
elif precision == 'double':
self.realtype = np.float64
else:
raise Exception('Unknown precision specified: ' + precision)
self.model = None
self.dis = None
self.sr = None
if 'model' in kwargs.keys():
self.model = kwargs.pop('model')
self.sr = self.model.dis.sr
self.dis = self.model.dis
if 'dis' in kwargs.keys():
self.dis = kwargs.pop('dis')
self.sr = self.dis.sr
if 'sr' in kwargs.keys():
self.sr = kwargs.pop('sr')
if len(kwargs.keys()) > 0:
args = ','.join(kwargs.keys())
raise Exception('LayerFile error: unrecognized kwargs: '+args)
#read through the file and build the pointer index
self._build_index()
# now that we read the data and know nrow and ncol,
# we can make a generic sr if needed
if self.sr is None:
self.sr = flopy.utils.SpatialReference(np.ones(self.ncol), np.ones(self.nrow), 0)
return
def to_shapefile(self, filename, kstpkper=None, totim=None, mflay=None, attrib_name='lf_data'):
"""
Export model output data to a shapefile at a specific location
in LayerFile instance.
Parameters
----------
filename : str
Shapefile name to write
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values.
totim : float
The simulation time.
mflay : integer
MODFLOW zero-based layer number to return. If None, then layer 1
will be written
attrib_name : str
Base name of attribute columns. (default is 'lf_data')
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> hdobj = flopy.utils.HeadFile('test.hds')
>>> times = hdobj.get_times()
>>> hdobj.to_shapefile('test_heads_sp6.shp', totim=times[-1])
"""
plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper,
totim=totim, mflay=mflay)
.transpose()).transpose()
if mflay != None:
attrib_dict = {attrib_name+'{0:03d}'.format(mflay):plotarray[0, :, :]}
else:
attrib_dict = {}
for k in range(plotarray.shape[0]):
name = attrib_name+'{0:03d}'.format(k)
attrib_dict[name] = plotarray[k]
from flopy.utils.flopy_io import write_grid_shapefile
write_grid_shapefile(filename, self.sr, attrib_dict)
def plot(self, axes=None, kstpkper=None, totim=None, mflay=None,
filename_base=None, **kwargs):
'''
Plot 3-D model output data in a specific location
in LayerFile instance
Parameters
----------
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values.
totim : float
The simulation time.
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
**kwargs : dict
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> hdobj = flopy.utils.HeadFile('test.hds')
>>> times = hdobj.get_times()
>>> hdobj.plot(totim=times[-1])
'''
if 'file_extension' in kwargs:
fext = kwargs.pop('file_extension')
fext = fext.replace('.', '')
else:
fext = 'png'
filenames = None
if filename_base is not None:
if mflay is not None:
i0 = int(mflay)
if i0+1 >= self.nlay:
i0 = self.nlay - 1
i1 = i0 + 1
else:
i0 = 0
i1 = self.nlay
filenames = []
[filenames.append('{}_Layer{}.{}'.format(filename_base, k+1, fext)) for k in range(i0, i1)]
# make sure we have a (lay,row,col) shape plotarray
plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper,
totim=totim, mflay=mflay)
.transpose()).transpose()
import flopy.plot.plotutil as pu
return pu._plot_array_helper(plotarray, model=self.model, sr=self.sr, axes=axes,
filenames=filenames,
mflay=mflay, **kwargs)
def _build_index(self):
"""
Build the recordarray and iposarray, which maps the header information
to the position in the formatted file.
"""
raise Exception('Abstract method _build_index called in LayerFile. This method needs to be overridden.')
def list_records(self):
"""
Print a list of all of the records in the file
obj.list_records()
"""
for header in self.recordarray:
print(header)
return
def _get_data_array(self, totim=0):
"""
Get the three dimensional data array for the
specified kstp and kper value or totim value.
"""
if totim > 0.:
keyindices = np.where((self.recordarray['totim'] == totim))[0]
else:
raise Exception('Data not found...')
#initialize head with nan and then fill it
data = np.empty((self.nlay, self.nrow, self.ncol),
dtype=self.realtype)
data[:, :, :] = np.nan
for idx in keyindices:
ipos = self.iposarray[idx]
ilay = self.recordarray['ilay'][idx]
if self.verbose:
print('Byte position in file: {0}'.format(ipos))
self.file.seek(ipos, 0)
data[ilay - 1, :, :] = self._read_data()
return data
def get_times(self):
"""
Get a list of unique times in the file
Returns
----------
out : list of floats
List contains unique simulation times (totim) in binary file.
"""
return self.times
def get_kstpkper(self):
"""
Get a list of unique stress periods and time steps in the file
Returns
----------
out : list of (kstp, kper) tuples
List of unique kstp, kper combinations in binary file. kstp and
kper values are presently zero-based.
"""
kstpkper = []
for kstp, kper in self.kstpkper:
kstpkper.append((kstp - 1, kper - 1))
return kstpkper
def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None):
"""
Get data from the file for the specified conditions.
Parameters
----------
idx : int
The zero-based record number. The first record is record 0.
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values.
totim : float
The simulation time.
mflay : integer
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (Default is None.)
Returns
----------
data : numpy array
Array has size (nlay, nrow, ncol) if mflay is None or it has size
(nrow, ncol) if mlay is specified.
See Also
--------
Notes
-----
if both kstpkper and totim are None, will return the last entry
Examples
--------
"""
# One-based kstp and kper for pulling out of recarray
if kstpkper is not None:
kstp1 = kstpkper[0] + 1
kper1 = kstpkper[1] + 1
totim1 = self.recordarray[np.where(
(self.recordarray['kstp'] == kstp1) &
(self.recordarray['kper'] == kper1))]["totim"][0]
elif totim is not None:
totim1 = totim
elif idx is not None:
totim1 = self.recordarray['totim'][idx]
else:
totim1 =self.times[-1]
data = self._get_data_array(totim1)
if mflay is None:
return data
else:
return data[mflay, :, :]
def get_alldata(self, mflay=None, nodata=-9999):
"""
Get all of the data from the file.
Parameters
----------
mflay : integer
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (Default is None.)
nodata : float
The nodata value in the data array. All array values that have the
nodata value will be assigned np.nan.
Returns
----------
data : numpy array
Array has size (ntimes, nlay, nrow, ncol) if mflay is None or it
has size (ntimes, nrow, ncol) if mlay is specified.
See Also
--------
Notes
-----
Examples
--------
"""
rv = []
for totim in self.times:
h = self.get_data(totim=totim, mflay=mflay)
rv.append(h)
rv = np.array(rv)
rv[rv == nodata] = np.nan
return rv
def _read_data(self):
"""
Read data from file
"""
raise Exception('Abstract method _read_data called in LayerFile. This method needs to be overridden.')
def _build_kijlist(self, idx):
if isinstance(idx, list):
kijlist = idx
elif isinstance(idx, tuple):
kijlist = [idx]
# Check to make sure that k, i, j are within range, otherwise
# the seek approach won't work. Can't use k = -1, for example.
for k, i, j in kijlist:
fail = False
errmsg = 'Invalid cell index. Cell ' + str((k, i, j)) + ' not within model grid: ' + \
str((self.nlay, self.nrow, self.ncol))
if k < 0 or k > self.nlay - 1:
fail = True
if i < 0 or i > self.nrow - 1:
fail = True
if j < 0 or j > self.ncol - 1:
fail = True
if fail:
raise Exception(errmsg)
return kijlist
def _get_nstation(self, idx, kijlist):
if isinstance(idx, list):
return len(kijlist)
elif isinstance(idx, tuple):
return 1
def _init_result(self, nstation):
# Initialize result array and put times in first column
result = np.empty((len(self.times), nstation + 1),
dtype=self.realtype)
result[:, :] = np.nan
result[:, 0] = np.array(self.times)
return result
def close(self):
"""
Close the file handle.
"""
self.file.close()
return
| gpl-2.0 |
shyamalschandra/scikit-learn | examples/svm/plot_svm_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
danielemichilli/LSPs | src/RFIexcision.py | 1 | 10967 | ########################################
#
# Radio Frequency Interferences excision
#
# Written by Daniele Michilli
#
########################################
import numpy as np
import pandas as pd
import logging
from scipy import special
from scipy import stats
import os
import subprocess
import pyfits
import presto
import C_Funct
from Parameters import *
import Paths as PATH
def sift_pulses(pulses, events, idL, sap, beam):
arff_basename = '{}/thresholds_{}_{}'.format(PATH.TMP_FOLDER, sap, beam)
filters(pulses, events, arff_basename+'.arff')
ML_predict = os.path.join(PATH.TMP_FOLDER, 'ML_predict.txt')
pulses = select_real_pulses(pulses,arff_basename, ML_predict)
return pulses
def select_real_pulses(pulses,basename, out_name):
classifier = os.path.join(PATH.PL_FOLDER, "scores_robLyon/PulsarProcessingScripts-master/ML.jar")
subprocess.call(['java', '-jar', classifier, '-v', '-m{}'.format(PATH.MODEL_FILE), '-p{}'.format(basename+'.arff'), '-o{}'.format(basename+'.positive'), '-a1'])
os.remove(basename+'.arff')
try: pulses_list = np.genfromtxt(basename+'.positive', dtype=int)
except IOError: return pd.DataFrame()
os.remove(basename+'.positive')
pulses = pulses.loc[pulses_list]
if pulses_list.size != pulses.shape[0]:
raise IndexError('Attention: classified file contains pulses not included!')
return pulses
def filters(pulses, events, filename, validation=False, header=True):
values = pd.DataFrame(dtype=np.float16)
idx = 0
events.sort_values('DM',inplace=True)
gb = events.groupby('Pulse',sort=False)
pulses.sort_index(inplace=True)
def mean2(x,y):
return np.sum(x*y)/y.sum()
def kur2(x,y):
std = np.clip(y.std(),1e-5,np.inf)
return np.sum((x-mean2(x,y))**4*y)/y.sum()/std**4 - 3
values[idx] = (gb.apply(lambda x: mean2(x.DM, x.Sigma)))
idx += 1
values[idx] = (gb.apply(lambda x: kur2(x.DM, x.Sigma)))
idx += 1
values[idx] = (gb.apply(lambda x: kur2(x.DM, x.Duration)))
idx += 1
values[idx] = pulses.Sigma
idx += 1
values[idx] = pulses.Duration
idx += 1
if validation: values[idx] = (pulses.Pulsar != 'RFI').astype(np.int)
else: values[idx] = '?%' + np.array(values.index.astype(str))
if header:
features_list = ''
for i in range(idx): features_list += '@attribute Feature{} numeric\n'.format(i)
header = """@relation Training
{}
@attribute class {{0,1}}
@data
""".format(features_list[:-1])
with open(filename, 'w') as f:
f.write(header)
values.to_csv(filename, sep=',', float_format='%10.5f', header=False, index=False, mode='a')
return
def filters_collection():
#def std2(x,y):
#return (np.sum((x-mean2(x,y))**2*y)/y.sum())**.5
#def ske2(x,y):
#std = np.clip(y.std(),1e-5,np.inf)
#return np.abs(np.sum((x-mean2(x,y))**3*y)/y.sum()/std**3)
#values[idx] = pulses.dTime
#idx += 1
#values[idx] = (gb.Duration.max() / pulses.Duration)
#idx += 1
#def flat_SNR_extremes(sigma):
#dim = np.max((1,sigma.shape[0]/6))
#return np.max((np.median(sigma.iloc[:dim]),np.median(sigma.iloc[-dim:]))) / sigma.max()
#values[idx] = (gb.apply(lambda x: flat_SNR_extremes(x.Sigma)))
#idx += 1
#def fit_simm(x,y):
#lim = y.argmax()
#xl = x.loc[:lim]
#if xl.shape[0] < 2: return 10000
#pl = np.polyfit(xl, y.loc[:lim], 1)[0]
#xr = x.loc[lim:]
#if xr.shape[0] < 2: return 10000
#pr = np.polyfit(xr, y.loc[lim:], 1)[0]
#return pl*pr
#values[idx] = (gb.apply(lambda x: fit_simm(x.DM, x.Sigma)))
#idx += 1
#values[idx] = (pulses.dTime / pulses.dDM)
#idx += 1
#values[idx] = (gb.apply(lambda x: std2(x.DM, x.Duration)))
#idx += 1
#values[idx] = (gb.apply(lambda x: std2(x.DM, x.Sigma)))
#idx += 1
#values[idx] = (gb.apply(lambda x: ske2(x.DM, x.Sigma)))
#idx += 1
#values[idx] = pulses.dDM.astype(np.float16)
#idx += 1
#Remove flat duration pulses. Minimum ratio to have weakest pulses with SNR = 8 (from Eq.6.21 of Pulsar Handbook)
#values[idx] = gb.Duration.max() / pulses.Duration - (pulses.Sigma / gb.Sigma.min())**2
#idx += 1
#def extreme_min(ev):
#ev_len = ev.shape[0] / 5
#return np.max((ev[:ev_len].min(), ev[-ev_len:].min()))
#values[idx] = (gb.apply(lambda x: extreme_min(x.Sigma))).astype(np.float16)
#idx += 1
#values[idx] = (gb.Sigma.min() / pulses.Sigma).astype(np.float16)
#idx += 1
#def std_time(x):
#return np.std(x - x.shift(1))
#values[idx] = (gb.apply(lambda x: std_time(x.Time))).astype(np.float16)
#idx += 1
#values[idx] = (pulses.Sigma - gb.Sigma.min()).astype(np.float16)
#idx += 1
#values[idx] = (gb.apply(lambda x: ske2(x.DM, x.Duration)))
#idx += 1
#values[idx] = (gb.apply(lambda x: mean2(x.DM, x.Duration)))
#idx += 1
#def mean(y):
#return y.sum()/y.size
#def std(y):
#return np.clip((np.sum((y-mean(y))**2)/(y.size-1))**.5, 1e-5, np.inf)
#def ske(y):
#return np.sum((y-mean(y))**3)/y.size/(np.sum((y-mean(y))**2)/y.size)**1.5
#def kur(y):
#return np.sum((y-mean(y))**4)/y.size/(np.sum((y-mean(y))**2)/y.size)**2 - 3
return
def multimoment(pulses,idL,inc=12):
pulses.sort_values(['SAP','BEAM'],inplace=True)
last_beam = -1
last_sap = -1
freq = np.linspace(F_MIN,F_MAX,2592)
v = 0
multimoment = np.zeros(pulses.shape[0],dtype=np.float32)
for i,(idx,puls) in enumerate(pulses.iterrows()):
if (puls.BEAM != last_beam) | (puls.SAP != last_sap):
beam = puls.BEAM.astype(int)
sap = puls.SAP.astype(int)
#Open the fits file
if beam==inc: stokes = 'incoherentstokes'
else: stokes = 'stokes'
filename = '{folder}/{idL}_red/{stokes}/SAP{sap}/BEAM{beam}/{idL}_SAP{sap}_BEAM{beam}.fits'.format(folder=PATH.RAW_FOLDER,idL=idL,stokes=stokes,sap=sap,beam=beam)
try: fits = pyfits.open(filename,memmap=True)
except IOError: continue
last_beam = beam
last_sap = sap
header = Utilities.read_header(filename)
MJD = header['STT_IMJD'] + header['STT_SMJD'] / 86400.
v = presto.get_baryv(header['RA'],header['DEC'],MJD,1800.,obs='LF')
if puls.DM < DM_STEP1: sample = puls.Sample
elif puls.DM < DM_STEP2: sample = puls.Sample * 2
else: sample = puls.Sample
sample += np.round(sample*v).astype(int)
duration = np.int(np.round(puls.Duration/RES))
#Load the spectrum
spectrum = Utilities.read_fits(fits,puls.DM.copy(),sample.copy(),duration,0)
#De-dispersion
time = (4149 * puls.DM * (np.power(freq,-2) - F_MAX**-2) / RES).round().astype(np.int)
spectrum = np.sum([spectrum[time+x,np.arange(2592)] for x in range(duration)],axis=0)
I1 = spectrum.size * np.sum(spectrum**2)
I2 = np.sum(spectrum)**2
multimoment[i] = (I1 - I2) / I2
pulses['multimoment'] = multimoment # <= MULTIMOMENT
return
#TO CHECK! Add confirmation observations
beams = {
13: [14, 15, 16, 17, 18, 19],
14: [20, 21, 15, 13, 19, 31],
15: [21, 22, 23, 16, 13, 14],
16: [15, 23, 24, 25, 17, 13],
17: [13, 16, 25, 26, 27, 18],
18: [19, 13, 17, 27, 28, 29],
19: [31, 14, 13, 18, 29, 30],
20: [32, 33, 21, 14, 31, 49],
21: [33, 34, 22, 15, 14, 20],
22: [34, 35, 36, 23, 15, 21],
23: [22, 36, 37, 24, 16, 15],
24: [23, 37, 38, 29, 25, 16],
25: [16, 24, 39, 40, 26, 17],
26: [17, 25, 40, 41, 42, 27],
27: [18, 17, 26, 42, 43, 28],
28: [29, 18, 27, 43, 44, 45],
29: [30, 19, 18, 28, 45, 46],
30: [48, 31, 19, 29, 46, 47],
31: [49, 20, 14, 19, 30, 48],
32: [50, 51, 33, 20, 49, 73],
33: [51, 52, 34, 21, 20, 32],
34: [52, 53, 35, 22, 21, 33],
35: [53, 54, 55, 36, 22, 34],
36: [35, 55, 56, 37, 23, 22],
37: [36, 56, 57, 38, 24, 23],
38: [37, 57, 58, 59, 39, 24],
39: [24, 38, 59, 60, 40, 25],
40: [25, 39, 60, 61, 41, 26],
41: [26, 40, 61, 62, 63, 42],
42: [27, 26, 41, 63, 64, 43],
43: [28, 27, 42, 64, 65, 44],
44: [45, 28, 43, 65, 66, 67],
45: [46, 29, 28, 44, 67, 68],
46: [47, 30, 29, 45, 68, 69],
47: [71, 48, 30, 46, 69, 70],
48: [72, 49, 31, 30, 47, 71],
49: [73, 32, 20, 31, 48, 72],
50: [51, 32, 73],
51: [52, 33, 32, 50],
52: [53, 34, 33, 51],
53: [54, 35, 34, 52],
54: [55, 35, 53],
55: [56, 36, 35, 54],
56: [57, 37, 36, 55],
57: [58, 38, 37, 56],
58: [59, 38, 57],
59: [60, 39, 38, 58],
60: [61, 40, 39, 59],
61: [62, 41, 40, 60],
62: [63, 41, 61],
63: [64, 42, 41, 62],
64: [65, 43, 42, 63],
65: [66, 44, 43, 64],
66: [67, 44, 65],
67: [68, 45, 44, 66],
68: [69, 46, 45, 67],
69: [70, 47, 46, 68],
70: [71, 47, 69],
71: [72, 48, 47, 70],
72: [73, 49, 48, 71],
73: [50, 32, 49, 72]
}
def time_span(pulses):
RFI = pd.DataFrame()
for sap in pulses.SAP.unique():
puls = pulses[pulses.SAP==sap]
try:
puls_time = puls.Time.round(-1).astype(int)
puls_time = puls.groupby(puls_time,sort=False)['N_events'].size()
mean = puls_time.sum()/360.
k = stats.poisson.ppf(0.99,mean)
puls_time = puls_time.index[puls_time>k]
puls_time = puls.loc[puls.Time.round(-1).astype(int).isin(puls_time),['DM','Time']]
except KeyError,AssertionError: puls_time = pd.DataFrame()
RFI = RFI.append(puls_time)
try:
puls_time = (puls.Time+5).round(-1).astype(int)
puls_time = puls.groupby(puls_time,sort=False)['N_events'].size()
mean = puls_time.sum()/360.
k = stats.poisson.ppf(0.99,mean)
puls_time = puls_time.index[puls_time>k]
puls_time = puls.loc[puls.Time.round(-1).astype(int).isin(puls_time),['DM','Time']]
except KeyError,AssertionError: puls_time = pd.DataFrame()
RFI = RFI.append(puls_time)
if RFI.empty: return RFI.index
RFI = RFI.drop_duplicates()
RFI.sort_values('Time',inplace=True)
no_rfi = np.zeros(RFI.shape[0],dtype=np.int8)
C_Funct.time_span(RFI.DM.astype(np.float32).values,RFI.Time.astype(np.float32).values,no_rfi)
RFI.sort_index(inplace=True)
return RFI.index[no_rfi==0]
def beam_comparison(pulses, database='SinglePulses.hdf5', inc=12):
conditions_A = '(Time > @tmin) & (Time < @tmax)'
conditions_B = '(SAP == @sap) & (BEAM != @beam) & (BEAM != @inc) & (DM > @DMmin) & (DM < @DMmax) & (Sigma >= @SNRmin)'
conditions_C = 'BEAM != @adjacent_beams'
def comparison(puls, inc, events):
sap = int(puls.SAP)
beam = int(puls.BEAM)
tmin = float(puls.Time - 2. * puls.Duration)
tmax = float(puls.Time + 2. * puls.Duration)
DMmin = float(puls.DM - 0.2)
DMmax = float(puls.DM + 0.2)
SNRmin = puls.Sigma / 2.
try: adjacent_beams = beams[beam]
except KeyError: adjacent_beams = []
if events.query(conditions_A).query(conditions_B).query(conditions_C).groupby('BEAM').count().shape[0] > 4: return 1
else: return 0
events = pd.read_hdf(database, 'events')
values = pulses.apply(lambda x: comparison(x, inc, events), axis=1)
pulses = pulses.loc[values.index[values == 0]]
return pulses
| mit |
raingo/TGIF-Release | code/gifs-filter/c3d-models/train.py | 1 | 1825 | #!/usr/bin/env python
"""
Python source code - replace this with a description of the code and write the code below this text.
"""
import os.path as osp
from sklearn import grid_search
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.cross_validation import StratifiedKFold
def path2uuid(path):
path = osp.basename(path)
fields = path.split('_')
if len(fields) > 1:
return fields[1]
else:
res, _ = osp.splitext(fields[0])
return res
def rfc():
return res
def load_list(path):
res = []
with open(path) as reader:
for line in reader:
res.append(path2uuid(line.strip()))
return res
def main():
import sys
import numpy as np
from sklearn import cross_validation
from sklearn import svm
import cPickle
data_dir = sys.argv[1]
fet_list = load_list(osp.join(data_dir, 'c3d.list'))
pos_list = load_list(osp.join(data_dir, 'pos.urls'))
features = np.load(osp.join(data_dir, 'c3d.npy'))
fet_set = set(fet_list)
pos_idx = [fet_list.index(i) for i in pos_list if i in fet_set]
y = np.zeros(features.shape[0])
y[pos_idx] = 1
print 'n_pos', np.sum(y), 'n_neg', np.sum(1 - y)
params = {'n_estimators':[2, 4, 5, 6, 8, 10, 30]}
#params = {'n_estimators':[50, 70, 100, 120, 150, 200]}
clf = grid_search.GridSearchCV(RandomForestClassifier(n_estimators = 2, n_jobs = 4), params, scoring = metrics.make_scorer(lambda yt, yp: metrics.f1_score(yt, yp, pos_label = 0)), cv = 5)
clf.fit(features, y)
print clf.best_score_
print clf.best_estimator_
cPickle.dump(clf.best_estimator_, open(osp.join(data_dir, 'c3d-models-rfc.pkl'), 'w'))
if __name__ == "__main__":
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/preprocessing/data.py | 6 | 67066 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| gpl-2.0 |
kubeflow/pipelines | components/keras/Train_classifier/from_CSV/component.py | 1 | 2835 | from typing import NamedTuple
from kfp.components import create_component_from_func, InputPath, OutputPath
def keras_train_classifier_from_csv(
training_features_path: InputPath('CSV'),
training_labels_path: InputPath('CSV'),
network_json_path: InputPath('KerasModelJson'),
model_path: OutputPath('KerasModelHdf5'),
loss_name: str = 'categorical_crossentropy',
num_classes: int = None,
optimizer: str = 'rmsprop',
optimizer_config: dict = None,
learning_rate: float = 0.01,
num_epochs: int = 100,
batch_size: int = 32,
metrics: list = ['accuracy'],
random_seed: int = 0,
) -> NamedTuple('Outputs', [
('final_loss', float),
('final_metrics', dict),
('metrics_history', dict),
]):
'''Trains classifier model using Keras.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
import keras
import numpy
import pandas
import tensorflow
tensorflow.random.set_seed(random_seed)
numpy.random.seed(random_seed)
training_features_df = pandas.read_csv(training_features_path)
training_labels_df = pandas.read_csv(training_labels_path)
x_train = training_features_df.to_numpy()
y_train_labels = training_labels_df.to_numpy()
print('Training features shape:', x_train.shape)
print('Numer of training samples:', x_train.shape[0])
# Convert class vectors to binary class matrices.
y_train_one_hot = keras.utils.to_categorical(y_train_labels, num_classes)
model_json_str = Path(network_json_path).read_text()
model = keras.models.model_from_json(model_json_str)
model.add(keras.layers.Activation('softmax'))
# Initializing the optimizer
optimizer_config = optimizer_config or {}
optimizer_config['learning_rate'] = learning_rate
optimizer = keras.optimizers.deserialize({
'class_name': optimizer,
'config': optimizer_config,
})
model.compile(
loss=loss_name,
optimizer=optimizer,
metrics=metrics,
)
history = model.fit(
x_train,
y_train_one_hot,
batch_size=batch_size,
epochs=num_epochs,
shuffle=True
)
model.save(model_path)
metrics_history = {name: [float(value) for value in values] for name, values in history.history.items()}
final_metrics = {name: values[-1] for name, values in metrics_history.items()}
final_loss = final_metrics['loss']
return (final_loss, final_metrics, metrics_history)
if __name__ == '__main__':
keras_train_classifier_from_csv_op = create_component_from_func(
keras_train_classifier_from_csv,
base_image='tensorflow/tensorflow:2.2.0',
packages_to_install=['keras==2.3.1', 'pandas==1.0.5'],
output_component_file='component.yaml',
)
| apache-2.0 |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Modeling_and_Simulation_Examples_Dynamic_Examples/contact/Dry_Contact/Soft_Contact/Frictional_SDOF/plot.py | 6 | 1237 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 24})
plt.style.use('grayscale')
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=20
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=20
fig = plt.figure(figsize=(10,10))
# Go over each feioutput and plot each one.
thefile = "Frictional_SDOF_freeVibration.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][24,:]
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(times,disp,Linewidth=4)
plt.grid()
plt.minorticks_on()
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
jhamman/mtclim5 | mtclim/mtclim.py | 1 | 29645 | '''
mtclim: Mountain Climate Simulator
'''
# Mountain Climate Simulator, meteorological forcing disaggregator
# Copyright (C) 2015 Joe Hamman
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
from scipy.optimize import minimize_scalar
from warnings import warn
from statsmodels.tools.eval_measures import rmse
from .physics import svp, calc_pet, atm_pres
from .share import default_parameters, default_options, constants
class MtClim(object):
'''The University of Montana Mountain Climate Simulator'''
def __init__(self, run=False, data=None, parameters=None, options=None):
'''
Initialize MtClim object.
Parameters
----------
data : pandas.DataFrame, optional
Input data: pandas DataFrame with at least `tmax`, `tmin`, and
`prcp`. Timestep freq should be `D`.
run : bool, optional.
Compute all values imediately (default: False)
parameters : dict-like, optional
Dictionary of parameters to use apart from the default parameters.
options : dict-like, optional
Dictionary of options to use apart from the default options.
'''
# Set model parameters
self.params = default_parameters
if parameters is not None:
self.params.update(parameters)
# Set model options
self.options = default_options
if options is not None:
self.options.update(options)
# Initialize data attributes
if data is not None:
self.data = data
else:
self.data = pd.DataFrame()
if run:
self.init()
self.run()
def init(self):
self.tinystepspday = 86400 / constants['SRADDT']
self.tiny_radfract = np.zeros(shape=(366, self.tinystepspday),
dtype=np.float64)
def run(self):
self.calc_tair()
self.calc_prcp()
self.snowpack()
self.calc_srad_humidity_iterative()
self.calc_longwave()
def resample(self):
return
@property
def data(self):
'''The objects DataFrame'''
return self._data
@data.setter
def data(self, df):
if not isinstance(df, pd.DataFrame):
raise TypeError(
'data must be a Pandas DataFrame instance, got %s' % type(df))
if not all([v in df for v in ['tmax', 'tmin', 'prcp']]):
raise ValueError('data must include tmax, tmin, and prcp')
self._data = df.resample('D').mean()
self.ndays = len(self._data)
def __repr__(self):
r = 'MtClim object\nparameters: {0}\noptions: {1}\ndata: {2}'.format(
self.params, self.options, self.data.head())
return r
def __str__(self):
return 'MtClim object'
def calc_tair(self):
'''
Calculates daily air temperatures.
'''
# calculate elevation difference in meters
dz = (self.params['site_elev'] -
self.params['base_elev'])
# apply lapse rate corrections to tmax and tmin
self.data['s_tmax'] = self.data['tmax'] + \
(dz * self.params['tmax_lr'])
self.data['s_tmin'] = self.data['tmin'] + \
(dz * self.params['tmin_lr'])
# Since tmax lapse rate usually has a larger absolute value than tmin
# lapse rate, it is possible at high elevation sites for these
# corrections to result in tmin > tmax. Check for that occurrence and
# force tmin = corrected tmax - 0.5 deg C.
self.data['s_tmin'].where(self.data['s_tmin'] > self.data['s_tmax'],
other=self.data['s_tmax'] - 0.5,
inplace=True)
# derived temperatures
tmean = self.data[['s_tmax', 's_tmin']].mean(axis=1)
self.data['s_tday'] = ((self.data['s_tmax'] - tmean) *
self.params['TDAYCOEF']) + tmean
def calc_prcp(self):
'''
Calculates daily total precipitation
'''
if (self.params['site_isoh'] is not None and
self.params['base_isoh'] is not None):
factor = self.params['site_isoh'] / self.params['base_isoh']
self.data['s_prcp'] = self.data['prcp'] * factor
else:
self.data['s_prcp'] = self.data['prcp']
def snowpack(self):
'''
estimates the accumulation and melt of snow for radiation algorithm
corrections
'''
# initialize SWE array
self.data['s_swe'] = 0.
# first pass
self._simple_snowpack(0.)
# use the first pass to set the initial snowpack conditions for the
# first day of data
start_yday = self.data.index.dayofyear[0]
prev_yday = (self.data.index[0] - pd.Timedelta(1, unit='D')).dayofyear
count = 0
swe_sum = 0.
for i in range(self.ndays):
if (self.data.index.dayofyear[i] == start_yday or
self.data.index.dayofyear[i] == prev_yday):
count += 1
swe_sum += self.data['s_swe'][i]
# Proceed with correction if there are valid days to reinitialize
# the snowpack estiamtes. Otherwise use the first-pass estimate.
if count:
snowpack = swe_sum / count
self._simple_snowpack(snowpack)
def _simple_snowpack(self, snowpack):
'''
'''
for i in range(self.ndays):
if (self.data['s_tmin'][i] <= self.params['SNOW_TCRIT']):
snowpack += self.data['s_prcp'][i]
else:
snowpack -= self.params['SNOW_TRATE'] * \
(self.data['s_tmin'][i] - self.params['SNOW_TCRIT'])
snowpack = np.maximum(snowpack, 0.)
self.data['s_swe'][i] = snowpack
def calc_srad_humidity_iterative(self, tol=0.01, win_type='boxcar'):
'''
Iterative estimation of shortwave radiation and humidity
TODO: simplify
'''
ndays = self.ndays
daylength = np.zeros(366)
window = np.zeros(ndays + 90)
ttmax0 = np.zeros(366)
flat_potrad = np.zeros(366)
slope_potrad = np.zeros(366)
t_fmax = np.zeros(ndays)
self.data['s_tfmax'] = 0.
# calculate diurnal temperature range for transmittance calculations
self.data['tmax'] = np.maximum(self.data['tmax'], self.data['tmin'])
dtr = self.data['tmax'] - self.data['tmin']
# smooth dtr array: After Bristow and Campbell, 1984
# use 30-day antecedent smoothing window
sm_dtr = pd.rolling_window(dtr, window=30, freq='D',
win_type=win_type).fillna(method='bfill')
if self.ndays <= 30:
warn('Timeseries is shorter than rolling mean window, filling '
'missing values with unsmoothed data.')
sm_dtr.fillna(dtr, inplace=True)
# calculate the annual total precip
sum_prcp = self.data['s_prcp'].values.sum()
ann_prcp = (sum_prcp / self.ndays) * 365.25
if (ann_prcp == 0.):
ann_prcp = 1.0
# Generate the effective annual precip, based on a 3-month
# moving-window. Requires some special case handling for the
# beginning of the record and for short records.
# check if there are at least 90 days in this input file, if not,
# use a simple total scaled to effective annual precip
if (ndays < 90):
sum_prcp = self.data['s_prcp'].values.sum()
effann_prcp = (sum_prcp / self.ndays) * 365.25
# if the effective annual precip for this period
# is less than 8 cm, set the effective annual precip to 8 cm
# to reflect an arid condition, while avoiding possible
# division-by-zero errors and very large ratios (PET/Pann)
effann_prcp = np.maximum(effann_prcp, 8.)
parray = effann_prcp
else:
# Check if the yeardays at beginning and the end of this input file
# match up. If so, use parts of the three months at the end
# of the input file to generate effective annual precip for
# the first 3-months. Otherwise, duplicate the first 90 days
# of the record.
start_yday = self.data.index.dayofyear[0]
end_yday = self.data.index.dayofyear[ndays - 1]
if (start_yday != 1):
if end_yday == start_yday - 1:
isloop = True
else:
if end_yday == 365 or end_yday == 366:
isloop = True
# fill the first 90 days of window
for i in range(90):
if (isloop):
window[i] = self.data['s_prcp'][ndays - 90 + i]
else:
window[i] = self.data['s_prcp'][i]
# fill the rest of the window array
window[90:] = self.data['s_prcp']
# for each day, calculate the effective annual precip from
# scaled 90-day total
for i in range(self.ndays):
sum_prcp = 0.
for j in range(90):
sum_prcp += window[i + j]
sum_prcp = (sum_prcp / 90.) * 365.25
# if the effective annual precip for this 90-day period
# is less than 8 cm, set the effective annual precip to 8 cm
# to reflect an arid condition, while avoiding possible
# division-by-zero errors and very large ratios (PET/Pann)
# start of the main radiation algorithm
# before starting the iterative algorithm between humidity and
# radiation, calculate all the variables that don't depend on
# humidity so they only get done once.
trans1 = self._calc_trans()
# STEP (3) build 366-day array of ttmax0, potential rad, and daylength
# precalculate the transcendentals
# check for (+/-) 90 degrees latitude, throws off daylength calc
lat = np.clip(self.params['site_lat'] * constants['RADPERDEG'],
-np.pi / 2., np.pi / 2.)
coslat = np.cos(lat)
sinlat = np.sin(lat)
cosslp = np.cos(self.params['site_slope'] * constants['RADPERDEG'])
sinslp = np.sin(self.params['site_slope'] * constants['RADPERDEG'])
cosasp = np.cos(self.params['site_aspect'] *
constants['RADPERDEG'])
sinasp = np.sin(self.params['site_aspect'] *
constants['RADPERDEG'])
# cosine of zenith angle for east and west horizons
coszeh = np.cos(np.pi / 2. - (self.params['site_east_horiz'] *
constants['RADPERDEG']))
coszwh = np.cos(np.pi / 2. - (self.params['site_west_horiz'] *
constants['RADPERDEG']))
# sub-daily time and angular increment information
dt = constants['SRADDT'] # set timestep
dh = dt / constants['SECPERRAD'] # calculate hour-angle step
# begin loop through yeardays
for i in range(365):
# calculate cos and sin of declination
decl = constants['MINDECL'] * np.cos((i + constants['DAYSOFF']) *
constants['RADPERDAY'])
cosdecl = np.cos(decl)
sindecl = np.sin(decl)
# do some precalculations for beam-slope geometry (bsg)
bsg1 = -sinslp * sinasp * cosdecl
bsg2 = (-cosasp * sinslp * sinlat + cosslp * coslat) * cosdecl
bsg3 = (cosasp * sinslp * coslat + cosslp * sinlat) * sindecl
# calculate daylength as a function of lat and decl
cosegeom = coslat * cosdecl
sinegeom = sinlat * sindecl
coshss = np.clip(-sinegeom / cosegeom, -1, 1)
hss = np.cos(coshss) # hour angle at sunset (radians)
# daylength (seconds)
daylength[i] = np.maximum(2.0 * hss * constants['SECPERRAD'],
86400)
# solar constant as a function of yearday (W/m^2)
sc = 1368.0 + 45.5 * np.sin((2.0 * np.pi * i / 365.25) + 1.7)
# extraterrestrial radiation perpendicular to beam, total over
# the timestep (J)
dir_beam_topa = sc * dt
sum_trans = 0.
sum_flat_potrad = 0.
sum_slope_potrad = 0.
# begin sub-daily hour-angle loop, from -hss to hss
for h in np.arange(-hss, hss, dh):
# precalculate cos and sin of hour angle
cosh = np.cos(h)
sinh = np.sin(h)
# calculate cosine of solar zenith angle
cza = cosegeom * cosh + sinegeom
# calculate cosine of beam-slope angle
cbsa = sinh * bsg1 + cosh * bsg2 + bsg3
# check if sun is above a flat horizon
if (cza > 0.):
# when sun is above the ideal (flat) horizon, do all the
# flat-surface calculations to determine daily total
# transmittance, and save flat-surface potential radiation
# for later calculations of diffuse radiation
# potential radiation for this time period, flat surface,
# top of atmosphere
dir_flat_topa = dir_beam_topa * cza
# determine optical air mass
am = 1.0 / (cza + 0.0000001)
if (am > 2.9):
ami = int((np.cos(cza) / constants['RADPERDEG'])) - 69
if (ami < 0):
ami = 0
if (ami > 20):
ami = 20
am = constants['OPTAM'][ami]
# correct instantaneous transmittance for this optical
# air mass
trans2 = np.power(trans1, am)
# instantaneous transmittance is weighted by potential
# radiation for flat surface at top of atmosphere to get
# daily total transmittance
sum_trans += trans2 * dir_flat_topa
# keep track of total potential radiation on a flat
# surface for ideal horizons
sum_flat_potrad += dir_flat_topa
# keep track of whether this time step contributes to
# component 1 (direct on slope)
if ((h < 0. and cza > coszeh and cbsa > 0.) or
(h >= 0. and cza > coszwh and cbsa > 0.)):
# sun between east and west horizons, and direct on
# slope. this period contributes to component 1
sum_slope_potrad += dir_beam_topa * cbsa
else:
dir_flat_topa = -1
tinystep = np.clip(((12 * 3600 + h * constants['SECPERRAD']) /
constants['SRADDT']),
0, self.tinystepspday - 1)
if dir_flat_topa > 0:
self.tiny_radfract[i, tinystep] = dir_flat_topa
else:
self.tiny_radfract[i, tinystep] = 0
if daylength[i] and sum_flat_potrad > 0:
self.tiny_radfract[i] /= sum_flat_potrad
# calculate maximum daily total transmittance and daylight average
# flux density for a flat surface and the slope
if daylength[i]:
ttmax0[i] = sum_trans / sum_flat_potrad
flat_potrad[i] = sum_flat_potrad / daylength[i]
slope_potrad[i] = sum_slope_potrad / daylength[i]
else:
ttmax0[i] = 0.
flat_potrad[i] = 0.
slope_potrad[i] = 0.
# force yearday 366 = yearday 365
ttmax0[365] = ttmax0[364]
flat_potrad[365] = flat_potrad[364]
slope_potrad[365] = slope_potrad[364]
daylength[365] = daylength[364]
self.tiny_radfract[365] = self.tiny_radfract[364]
# STEP (4) calculate the sky proportion for diffuse radiation
# uses the product of spherical cap defined by average horizon angle
# and the great-circle truncation of a hemisphere. this factor does not
# vary by yearday.
avg_horizon = (self.params['site_east_horiz'] +
self.params['site_west_horiz']) / 2.0
horizon_scalar = 1.0 - np.sin(avg_horizon * constants['RADPERDEG'])
if (self.params['site_slope'] > avg_horizon):
slope_excess = self.params['site_slope'] - avg_horizon
else:
slope_excess = 0.
if (2.0 * avg_horizon > 180.):
slope_scalar = 0.
else:
slope_scalar = np.clip(
1.0 - (slope_excess / (180.0 - 2.0 * avg_horizon)), 0, None)
sky_prop = horizon_scalar * slope_scalar
# b parameter, and t_fmax not varying with Tdew, so these can be
# calculated once, outside the iteration between radiation and humidity
# estimates. Requires storing t_fmax in an array.
# b parameter from 30-day average of DTR
b = self.params['B0'] + self.params['B1'] * \
np.exp(-self.params['B2'] * sm_dtr)
# proportion of daily maximum transmittance
t_fmax = 1.0 - 0.9 * np.exp(-b * np.power(dtr, self.params['C']))
# correct for precipitation if this is a rain day
inds = np.nonzero(self.data['prcp'] >
self.options['SW_PREC_THRESH'])[0]
t_fmax[inds] *= self.params['RAIN_SCALAR']
self.data['s_tfmax'] = t_fmax
# Initial values of vapor pressure, etc
if 'tdew' in self.data:
# Observed Tdew supplied
tdew = self.data['tdew']
else:
# Estimate Tdew
tdew = self.data['s_tmin']
if 's_hum' in self.data:
# Observed vapor pressure supplied
pva = self.data['s_hum']
else:
# convert dewpoint to vapor pressure
pva = svp(tdew)
# Other values needed for srad_humidity calculation
pa = atm_pres(self.params['site_elev'])
yday = self.data.index.dayofyear - 1
self.data['s_dayl'] = daylength[yday]
tdew_save = tdew
pva_save = pva
# Initial estimates of solar radiation, cloud fraction, etc.
tdew, pva, pet = self._compute_srad_humidity_onetime(
tdew, pva, ttmax0, flat_potrad, slope_potrad, sky_prop, daylength,
parray, pa, dtr)
# estimate annual PET
sum_pet = pet.values.sum()
ann_pet = (sum_pet / self.ndays) * 365.25
# Reset humidity terms if no iteration desired
if (('tdew' in self.data) or ('s_hum' in self.data) or
(self.options['VP_ITER'].upper() == 'VP_ITER_ANNUAL' and
ann_pet / ann_prcp >= 2.5)):
tdew = tdew_save[:]
pva = pva_save[:]
# Set up srad-humidity iterations
if (self.options['VP_ITER'].upper() == 'VP_ITER_ALWAYS' or
(self.options['VP_ITER'].upper() == 'VP_ITER_ANNUAL' and
ann_pet / ann_prcp >= 2.5) or
self.options['VP_ITER'].upper() == 'VP_ITER_CONVERGE'):
if (self.options['VP_ITER'].upper() == 'VP_ITER_CONVERGE'):
max_iter = 100
else:
max_iter = 2
else:
max_iter = 1
# srad-humidity iterations
# iter_i = 1
rmse_tdew = tol + 1
# while (rmse_tdew > tol and iter_i < max_iter):
# tdew_save = tdew[:]
#
# tdew = self._compute_srad_humidity_onetime(
# tdew, pva, ttmax0, flat_potrad, slope_potrad, sky_prop,
# daylength, parray, pa, dtr)
#
# rmse_tdew = rmse(tdew, tdew_save)
# iter_i += 1
def f(tdew, *args):
rmse_tdew = rmse(self._compute_srad_humidity_onetime(tdew, *args),
tdew)
return rmse_tdew
res = minimize_scalar(f, tdew, args=(pva, ttmax0, flat_potrad,
slope_potrad, sky_prop, daylength,
parray, pa, dtr),
tol=rmse_tdew, options={'maxiter': max_iter})
tdew = res.x
pva = svp(tdew)
# save humidity in output data structure
if 's_hum' not in self.data:
self.data['s_hum'] = pva
# output humidity as vapor pressure deficit (Pa)
# calculate saturated VP at tday
pvs = svp(self.data['s_tday'])
vpd = pvs - pva
self.data['s_vpd'] = np.maximum(vpd, 0.)
def _compute_srad_humidity_onetime(self, tdew, pva, ttmax0, flat_potrad,
slope_potrad, sky_prop, daylength,
parray, pa, dtr):
'''
Initial estimates of solar radiation, cloud fraction, etc.
Parameters
----------
tdew : pandas.Series
description
pva : pandas.Series
description
ttmax0 : pandas.Series
description
flat_potrad : pandas.Series
description
slope_potrad : pandas.Series
description
sky_prop : pandas.Series
description
daylength : pandas.Series
description
parray : pandas.Series
description
pa : pandas.Series
description
dtr : pandas.Series
description
Returns
----------
tdew : pandas.Series
description
pva : pandas.Series
description
pet : pandas.Series
description
'''
yday = self.data.index.dayofyear - 1
# Compute SW radiation
t_tmax = ttmax0[yday] + (self.params['ABASE'] * pva)
# this is mainly for the case of observed VP supplied, for
# which t_tmax sometimes ends up being negative (when potential
# radiation is low and VP is high)
t_tmax = np.minimum(t_tmax, 0.0001)
self.data['s_ttmax'] = t_tmax
# final daily total transmittance
t_final = t_tmax * self.data['s_tfmax']
# estimate fraction of radiation that is diffuse, on an
# instantaneous basis, from relationship with daily total
# transmittance in Jones (Plants and Microclimate, 1992)
# Fig 2.8, p. 25, and Gates (Biophysical Ecology, 1980)
# Fig 6.14, p. 122.
pdif = np.clip(-1.25 * t_final + 1.25, 0., 1.)
# estimate fraction of radiation that is direct, on an
# instantaneous basis
pdir = 1.0 - pdif
# the daily total radiation is estimated as the sum of the
# following two components:
# 1. The direct radiation arriving during the part of
# the day when there is direct beam on the slope.
# 2. The diffuse radiation arriving over the entire daylength
# (when sun is above ideal horizon).
# component 1
srad1 = slope_potrad[yday] * t_final * pdir
# component 2 (diffuse)
# includes the effect of surface albedo in raising the diffuse
# radiation for obstructed horizons
srad2 = (flat_potrad[yday] * t_final * pdif) * \
(sky_prop + self.params['DIF_ALB'] * (1.0 - sky_prop))
# snow pack influence on radiation
sc = np.zeros_like(self.data['s_swe'])
if (self.options['MTCLIM_SWE_CORR']):
inds = np.nonzero(self.data['s_swe'] > 0. * daylength[yday] > 0.)
# convert to W/m2 and check for zero daylength
# snow correction in J/m2/day
sc[inds] = (1.32 + 0.096 * self.data['s_swe'][inds]) *\
1.0e6 / daylength[yday][inds]
# set a maximum correction of 100 W/m2
sc = np.maximum(sc, 100.) # JJH - this is fishy
# this could also be sc[inds] = np.maximum(sc[inds], 100.) optimize
# save daily radiation
# save cloud transmittance when rad is an input
if 's_swrad' in self.data:
potrad = (srad1 + srad2 + sc) * daylength[yday] / t_final / 86400
self.data['s_tfmax'] = 1.0
inds = np.nonzero((potrad > 0.) * (self.data['s_swrad'] > 0.) *
(daylength[yday] > 0))[0]
# both of these are 24hr mean rad. here
self.data['s_tfmax'][inds] = (self.data['s_swrad'][inds] /
(potrad[inds] * t_tmax[inds]))
self.data['s_tfmax'] = np.maximum(self.data['s_tfmax'], 1.)
else:
self.data['s_swrad'] = srad1 + srad2 + sc
if (self.options['LW_CLOUD'].upper() == 'LW_CLOUD_DEARDORFF'):
self.data['s_tskc'] = (1. - self.data['s_tfmax'])
else:
self.data['s_tskc'] = np.sqrt((1. - self.data['s_tfmax']) / 0.65)
self.data['s_fdir'] = pdir
# Compute PET using SW radiation estimate, and update Tdew, pva **
tmink = self.data['s_tmin'] + constants['KELVIN']
pet = calc_pet(self.data['s_swrad'], self.data['s_tday'], pa,
self.data['s_dayl'])
# calculate ratio (PET/effann_prcp) and correct the dewpoint
ratio = pet / parray
self.data['s_ppratio'] = ratio * 365.25
tdewk = tmink * (-0.127 + 1.121 *
(1.003 - 1.444 * ratio + 12.312 *
np.power(ratio, 2) - 32.766 * np.power(ratio, 3)) +
0.0006 * dtr)
tdew = tdewk - constants['KELVIN']
return tdew
def calc_longwave(self):
'''This routine estimates long wave radiation based on the fractional
cloud cover (self.data['s_tskc']), the current air temperature (C), and
the atmospheric vapor pressure (Pa).
'''
# See Bras, R. F. , "Hydrology, an introduction to hydrologic science",
# Addison-Wesley, 1990, p. 42-45
# convert to Kelvin
air_temp = self.data['s_tday'] + constants['KELVIN']
# convert to mbar
vp = self.data['s_vp'] / 100
if (self.options['LW_TYPE'].upper() == 'TVA'):
# TVA (1972) - see Bras 2.35
emissivity_clear = 0.740 + 0.0049 * vp
elif (self.options['LW_TYPE'].upper() == 'ANDERSON'):
# Anderson (1964)
emissivity_clear = 0.68 + 0.036 * np.power(vp, 0.5)
elif (self.options['LW_TYPE'].upper() == 'BRUTSAERT'):
# Brutsaert (1975)
x = vp / air_temp
emissivity_clear = 1.24 * np.power(x, 0.14285714)
elif (self.options['LW_TYPE'].upper() == 'SATTERLUND'):
# Satterlund (1979)
emissivity_clear = 1.08 * \
(1 - np.exp(-1 * np.power(vp, (air_temp / 2016))))
elif (self.options['LW_TYPE'].upper() == 'IDSO'):
# Idso (1981)
emissivity_clear = 0.7 + 5.95e-5 * vp * np.exp(1500 / air_temp)
elif (self.options['LW_TYPE'].upper() == 'PRATA'):
# Prata (1996)
x = 46.5 * vp / air_temp
emissivity_clear = 1 - (1 + x) * \
np.exp(-1 * np.power((1.2 + 3 * x), 0.5))
else:
raise ValueError('Unknown LW_TYPE {0}'.format(
self.options['LW_TYPE']))
tskc = self.data['s_tskc']
if (self.options['LW_CLOUD'].upper() == 'LW_CLOUD_DEARDORFF'):
# Assume emissivity of clouds is 1.0, and that total emissivity is
# weighted average of cloud emission plus clear-sky emission,
# weighted by fraction of sky occupied by each
# (method of Deardorff, 1978)
emissivity = tskc * 1.0 + (1 - tskc) * emissivity_clear
else:
# see Bras 2.43
emissivity = (1.0 + (0.17 * tskc * tskc)) * emissivity_clear
self.data['s_lwrad'] = (emissivity * constants['STEFAN_B'] *
np.power(air_temp, 4))
def _calc_trans(self):
# STEP (1) calculate pressure ratio (site/reference) = f(elevation)
pratio = np.power((1.0 - (constants['LR_STD'] *
self.params['site_elev']) /
constants['T_STD']),
(constants['G_STD'] / (constants['LR_STD'] *
(constants['R'] / constants['MA']))))
# STEP (2) correct initial transmittance for elevation
return np.power(self.params['TBASE'], pratio)
| gpl-3.0 |
klauer/hkl | Documentation/sphinx/source/pyplots/trajectory_full.py | 2 | 2035 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gi.repository import GLib
from gi.repository import Hkl
sample = Hkl.Sample.new("toto")
lattice = Hkl.Lattice.new(1.54, 1.54, 1.54,
math.radians(90.0),
math.radians(90.0),
math.radians(90.))
sample.lattice_set(lattice)
detector = Hkl.Detector.factory_new(Hkl.DetectorType(0))
factory = Hkl.factories()['K6C']
geometry = factory.create_new_geometry()
axis_names = geometry.axis_names_get()
geometry.axis_values_set([0., 120, 0., -90., 0., 60.],
Hkl.UnitEnum.USER)
engines = factory.create_new_engine_list()
engines.init(geometry, detector, sample)
n = 10
h = numpy.linspace(0, 0, n + 1)
k = numpy.linspace(0, 1, n + 1)
l = numpy.linspace(1, 1, n + 1)
# get the hkl engine
hkl = engines.engine_get_by_name("hkl")
# set the hkl engine and get the results
trajectories = []
for hh, kk, ll in zip(h, k, l):
try:
solutions = hkl.pseudo_axis_values_set([hh, kk, ll],
Hkl.UnitEnum.USER)
first_solution = solutions.items()[0]
for i, item in enumerate(solutions.items()):
try:
trajectories[i]
except IndexError:
trajectories.append([])
values = item.geometry_get().axis_values_get(Hkl.UnitEnum.USER)
# print values, item.geometry.distance(geometry)
trajectories[i].append(values)
engines.select_solution(first_solution)
# print
except GLib.GError, err:
pass
for i, (trajectory, title) in enumerate(zip(trajectories[1:],
["2nd", "3rd", "4th"])):
ax = plt.subplot(1, 3, i + 1)
plt.title(title)
plt.plot(trajectory, 'o-')
plt.ylim(-180, 180)
if i != 0:
for tl in ax.get_yticklabels():
tl.set_visible(False)
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/ipykernel/inprocess/tests/test_kernel.py | 8 | 2417 | # Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import sys
import unittest
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
from ipykernel.inprocess.manager import InProcessKernelManager
from ipykernel.inprocess.ipkernel import InProcessKernel
from ipykernel.tests.utils import assemble_output
from IPython.testing.decorators import skipif_not_matplotlib
from IPython.utils.io import capture_output
from ipython_genutils import py3compat
if py3compat.PY3:
from io import StringIO
else:
from StringIO import StringIO
class InProcessKernelTestCase(unittest.TestCase):
def setUp(self):
self.km = InProcessKernelManager()
self.km.start_kernel()
self.kc = self.km.client()
self.kc.start_channels()
self.kc.wait_for_ready()
@skipif_not_matplotlib
def test_pylab(self):
"""Does %pylab work in the in-process kernel?"""
kc = self.kc
kc.execute('%pylab')
out, err = assemble_output(kc.iopub_channel)
self.assertIn('matplotlib', out)
def test_raw_input(self):
""" Does the in-process kernel handle raw_input correctly?
"""
io = StringIO('foobar\n')
sys_stdin = sys.stdin
sys.stdin = io
try:
if py3compat.PY3:
self.kc.execute('x = input()')
else:
self.kc.execute('x = raw_input()')
finally:
sys.stdin = sys_stdin
self.assertEqual(self.km.kernel.shell.user_ns.get('x'), 'foobar')
def test_stdout(self):
""" Does the in-process kernel correctly capture IO?
"""
kernel = InProcessKernel()
with capture_output() as io:
kernel.shell.run_cell('print("foo")')
self.assertEqual(io.stdout, 'foo\n')
kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session)
kernel.frontends.append(kc)
kc.execute('print("bar")')
out, err = assemble_output(kc.iopub_channel)
self.assertEqual(out, 'bar\n')
def test_getpass_stream(self):
"Tests that kernel getpass accept the stream parameter"
kernel = InProcessKernel()
kernel._allow_stdin = True
kernel._input_request = lambda *args, **kwargs : None
kernel.getpass(stream='non empty')
| gpl-3.0 |
aminert/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
wanggang3333/scikit-learn | sklearn/cluster/tests/test_k_means.py | 132 | 25860 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| bsd-3-clause |
mjudsp/Tsallis | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 157 | 2409 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tools/tests/test_merge_ordered.py | 7 | 3401 | import nose
import pandas as pd
from pandas import DataFrame, merge_ordered
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
from numpy import nan
class TestOrderedMerge(tm.TestCase):
def setUp(self):
self.left = DataFrame({'key': ['a', 'c', 'e'],
'lvalue': [1, 2., 3]})
self.right = DataFrame({'key': ['b', 'c', 'd', 'f'],
'rvalue': [1, 2, 3., 4]})
def test_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
pd.ordered_merge(self.left, self.right, on='key')
# GH #813
def test_basic(self):
result = merge_ordered(self.left, self.right, on='key')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1, nan, 2, nan, 3, nan],
'rvalue': [nan, 1, 2, 3, nan, 4]})
assert_frame_equal(result, expected)
def test_ffill(self):
result = merge_ordered(
self.left, self.right, on='key', fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1., 1, 2, 2, 3, 3.],
'rvalue': [nan, 1, 2, 3, 3, 4]})
assert_frame_equal(result, expected)
def test_multigroup(self):
left = pd.concat([self.left, self.left], ignore_index=True)
# right = concat([self.right, self.right], ignore_index=True)
left['group'] = ['a'] * 3 + ['b'] * 3
# right['group'] = ['a'] * 4 + ['b'] * 4
result = merge_ordered(left, self.right, on='key', left_by='group',
fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'] * 2,
'lvalue': [1., 1, 2, 2, 3, 3.] * 2,
'rvalue': [nan, 1, 2, 3, 3, 4] * 2})
expected['group'] = ['a'] * 6 + ['b'] * 6
assert_frame_equal(result, expected.ix[:, result.columns])
result2 = merge_ordered(self.right, left, on='key', right_by='group',
fill_method='ffill')
assert_frame_equal(result, result2.ix[:, result.columns])
result = merge_ordered(left, self.right, on='key', left_by='group')
self.assertTrue(result['group'].notnull().all())
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on='key')
tm.assertIsInstance(result, NotADataFrame)
def test_empty_sequence_concat(self):
# GH 9157
empty_pat = "[Nn]o objects"
none_pat = "objects.*None"
test_cases = [
((), empty_pat),
([], empty_pat),
({}, empty_pat),
([None], none_pat),
([None, None], none_pat)
]
for df_seq, pattern in test_cases:
tm.assertRaisesRegexp(ValueError, pattern, pd.concat, df_seq)
pd.concat([pd.DataFrame()])
pd.concat([None, pd.DataFrame()])
pd.concat([pd.DataFrame(), None])
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
tjlaboss/openmc | docs/source/conf.py | 2 | 7780 | # -*- coding: utf-8 -*-
#
# metasci documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 7 22:29:49 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Determine if we're on Read the Docs server
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On Read the Docs, we need to mock a few third-party modules so we don't get
# ImportErrors when building documentation
from unittest.mock import MagicMock
MOCK_MODULES = [
'openmoc', 'openmc.data.reconstruct',
]
sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.katex',
'sphinx_numfig',
'nbsphinx'
]
if not on_rtd:
extensions.append('sphinxcontrib.rsvgconverter')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'OpenMC'
copyright = '2011-2021, Massachusetts Institute of Technology and OpenMC contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.13"
# The full version, including alpha/beta/rc tags.
release = "0.13.0-dev"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
#pygments_style = 'friendly'
#pygments_style = 'bw'
#pygments_style = 'fruity'
#pygments_style = 'manni'
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = '_images/openmc_logo.png'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "OpenMC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_css_file('theme_overrides.css')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'openmcdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openmc.tex', 'OpenMC Documentation',
'OpenMC contributors', 'manual'),
]
latex_elements = {
'preamble': r"""
\usepackage{enumitem}
\usepackage{amsfonts}
\usepackage{amsmath}
\setlistdepth{99}
\usepackage{tikz}
\usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy}
\usepackage{fixltx2e}
\hypersetup{bookmarksdepth=3}
\setcounter{tocdepth}{2}
\numberwithin{equation}{section}
\DeclareUnicodeCharacter{03B1}{$\alpha$}
""",
'printindex': r""
}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
#Autodocumentation Flags
#autodoc_member_order = "groupwise"
#autoclass_content = "both"
autosummary_generate = True
napoleon_use_ivar = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('https://matplotlib.org/', None)
}
| mit |
meduz/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
severinson/Coded-Shuffling | plot.py | 2 | 17778 | '''Plotting tools
'''
import model
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.special import comb as nchoosek
def get_parameters_size():
'''Get a list of parameters for the size plot.'''
rows_per_server = 2000
rows_per_partition = 10
code_rate = 2/3
muq = 2
num_columns = int(1e4)
parameters = list()
num_servers = [5, 8, 20, 50, 80, 125, 200, 500, 2000]
for servers in num_servers:
par = model.SystemParameters.fixed_complexity_parameters(
rows_per_server=rows_per_server,
rows_per_partition=rows_per_partition,
min_num_servers=servers,
code_rate=code_rate,
muq=muq,
num_columns=num_columns
)
parameters.append(par)
return parameters
def get_parameters_size_2():
'''Get a list of parameters for the size plot.'''
rows_per_server = 2000
rows_per_partition = 10
code_rate = 2/3
muq = 2
num_columns = None
num_outputs_factor = 1000
# num_outputs_factor = 10
parameters = list()
num_servers = [5, 8, 20, 50, 80, 125, 200, 500, 2000]
for servers in num_servers:
par = model.SystemParameters.fixed_complexity_parameters(
rows_per_server=rows_per_server,
rows_per_partition=rows_per_partition,
min_num_servers=servers,
code_rate=code_rate,
muq=muq,
num_columns=num_columns,
num_outputs_factor=num_outputs_factor
)
parameters.append(par)
return parameters
def get_parameters_size_3():
'''Get a list of parameters for the size plot.'''
rows_per_server = 2000
rows_per_partition = 100
code_rate = 2/3
muq = 2
num_columns = None
# num_outputs_factor = 1000
num_outputs_factor = 10
parameters = list()
num_servers = [5, 8, 20, 50, 80, 125, 200, 500, 2000]
for servers in num_servers:
par = model.SystemParameters.fixed_complexity_parameters(
rows_per_server=rows_per_server,
rows_per_partition=rows_per_partition,
min_num_servers=servers,
code_rate=code_rate,
muq=muq,
num_columns=num_columns,
num_outputs_factor=num_outputs_factor
)
parameters.append(par)
return parameters
def get_parameters_size_4():
'''Get a list of parameters for the size plot.'''
rows_per_server = 2000
rows_per_partition = 100
code_rate = 2/3
muq = 2
num_columns = None
num_outputs_factor = 1000
parameters = list()
num_servers = [5, 8, 20, 50, 80, 125, 200, 500, 2000]
for servers in num_servers:
par = model.SystemParameters.fixed_complexity_parameters(
rows_per_server=rows_per_server,
min_num_servers=servers,
code_rate=code_rate,
muq=muq,
num_columns=num_columns,
num_outputs_factor=num_outputs_factor
)
parameters.append(par)
return parameters
def get_parameters_tradeoff():
'''Get a list of parameters for the load-vs-delay plot.'''
num_outputs = 840
num_servers = 21
server_storage = 1/2
parameters = list()
num_source_rows = 352716
for q in range(8, num_servers+1):
num_coded_rows = num_source_rows * num_servers / q
num_batches = nchoosek(num_servers, int(server_storage*q))
rows_per_batch = int(num_coded_rows / num_batches)
try:
par = model.SystemParameters(
rows_per_batch=1,
num_servers=num_servers,
q=q,
num_outputs=q,
server_storage=server_storage,
num_partitions=1,
)
except ValueError:
continue
parameters.append(par)
return parameters
def get_parameters_N():
'''Get a list of parameters for the N to n ratio plot.'''
rows_per_batch = 100
num_servers = 9
q = 6
num_outputs = q
server_storage = 1/3
num_partitions = 240
num_outputs = 10*q
# num_columns = 20000
parameters = list()
for i in range(1, 11):
# num_outputs = i * q
num_columns = pow(i, 2) * 200
par = model.SystemParameters(
rows_per_batch=rows_per_batch,
num_servers=num_servers,
q=q,
num_outputs=num_outputs,
server_storage=server_storage,
num_partitions=num_partitions,
num_columns=num_columns,
)
parameters.append(par)
return parameters
def get_parameters_deadline():
'''Get a list of parameters for the N to n ratio plot.'''
rows_per_batch = 10
num_servers = 201
q = 134
# num_partitions = [
# 100, 200, 268,
# ]
# num_partitions = rows_per_batch
num_partitions = 8375
# num_columns = 5000
# num_columns = 10000
num_columns = None
# for T in num_partitions:
num_outputs = 1340
server_storage = 2/q
parameters = model.SystemParameters(
rows_per_batch=rows_per_batch,
num_servers=num_servers,
q=q,
num_outputs=num_outputs,
server_storage=server_storage,
num_partitions=num_partitions,
num_columns=num_columns,
)
return parameters
def get_parameters_partitioning():
'''Get a list of parameters for the partitioning plot.'''
rows_per_batch = 250
num_servers = 9
q = 6
num_outputs = q
server_storage = 1/3
num_partitions = [2, 3, 4, 5, 6, 8, 10, 12, 15, 20, 24, 25, 30,
40, 50, 60, 75, 100, 120, 125, 150, 200, 250,
300, 375, 500, 600, 750, 1000, 1500, 3000]
parameters = list()
for partitions in num_partitions:
par = model.SystemParameters(
rows_per_batch=rows_per_batch,
num_servers=num_servers,
q=q,
num_outputs=num_outputs,
server_storage=server_storage,
num_partitions=partitions,
)
parameters.append(par)
return parameters
def get_parameters_partitioning_2():
'''Constant system size, increasing partitions, num_outputs=num_columns'''
rows_per_batch = 250
num_servers = 9
q = 6
num_outputs = 6000
server_storage = 1/3
num_partitions = [2, 3, 4, 5, 6, 8, 10, 12, 15, 20, 24, 25, 30,
40, 50, 60, 75, 100, 120, 125, 150, 200, 250,
300, 375, 500, 600, 750, 1000, 1500, 3000]
parameters = list()
for partitions in num_partitions:
par = model.SystemParameters(
rows_per_batch=rows_per_batch,
num_servers=num_servers,
q=q,
num_outputs=num_outputs,
server_storage=server_storage,
num_partitions=partitions,
)
parameters.append(par)
return parameters
def get_parameters_partitioning_3():
'''Get a list of parameters for the size plot.'''
rows_per_batch = 10
num_servers = 201
q = 134
num_outputs = 1340
server_storage = 2 / q
# num_partitions = [2]
num_partitions = [10, 134, 8375, 13400, 16750, 67000]
# 26800
parameters = list()
for partitions in num_partitions:
par = model.SystemParameters(
rows_per_batch=rows_per_batch,
num_servers=num_servers,
q=q,
num_outputs=num_outputs,
server_storage=server_storage,
num_partitions=partitions,
)
parameters.append(par)
return parameters
def load_delay_plot(results, plot_settings, xdata, xlabel='',
normalize=None, legend='load', ncol=1, loc='best',
ylim_top=None, xlim_top=None,
ylim_bot=None, xlim_bot=None,
vline=None, title=None, show=True):
'''Create a plot with two subplots for load and delay respectively.
Args:
results: List of Dataframes.
plot_settings: List of dicts with plot settings.
xdata: Label of the X axis data ('partitions' or 'servers').
xlabel: X axis label
normalize: If a SimulatorResult is provided, all ploted results
are normalized by this one.
legend: Place the legend in the load or delay plot by setting this
argument to 'load' or 'delay'.
ncol: number of legend columns.
loc: legend location.
ylim_top, xlim_bot, ylim_bot, xlim_bot: tuples (min, max) used to set axis
limits for y/x for the top and bottom plots. Set to None to use default
axis limits.
vline: plot a vertical line at this value.
show: show the plots if True.
'''
assert isinstance(results, list)
assert isinstance(plot_settings, list)
# assert isinstance(normalize, SimulatorResult) or normalize is None
assert isinstance(show, bool)
# plt.rc('pgf', texsystem='pdflatex')
# plt.rc('text', usetex=True)
# plt.rcParams['text.latex.preamble'] = [r'\usepackage{lmodern}']
# _ = plt.figure(figsize=(10,9))
# _ = plt.figure(figsize=(11,4))
plt.figure()
# Plot load
plt.autoscale(enable=True)
plt.tight_layout()
ax1 = plt.subplot(211)
plt.setp(ax1.get_xticklabels(), visible=False)
for result, plot_setting in zip(results, plot_settings):
markevery = 0.2
if 'markevery' in plot_setting:
markevery = plot_setting['markevery']
plot_result(
result,
plot_setting,
xdata,
'load',
ylabel=r'$L$',
subplot=True,
normalize=normalize,
markevery=markevery,
)
# plot a vertical bar at the partitioning limit
if vline:
plt.axvline(x=vline, color='k', linestyle=':', linewidth=3)
plt.margins(y=0.1)
if legend == 'load':
plt.legend(
numpoints=1,
shadow=True,
labelspacing=0,
loc=loc,
fancybox=False,
borderaxespad=0.1,
ncol=ncol,
)
if title:
plt.title(title)
# Plot delay
ax2 = plt.subplot(212, sharex=ax1)
for result, plot_setting in zip(results, plot_settings):
markevery = 0.2
if 'markevery' in plot_setting:
markevery = plot_setting['markevery']
plot_result(
result,
plot_setting,
xdata,
'overall_delay',
xlabel=xlabel,
ylabel=r'$D$',
subplot=True,
normalize=normalize,
markevery=markevery,
)
if legend == 'delay':
plt.legend(
numpoints=1,
shadow=True,
labelspacing=0,
loc=loc,
fancybox=False,
borderaxespad=0.1,
ncol=ncol,
)
# plot a vertical bar at the partitioning limit
if vline:
plt.axvline(x=vline, color='k', linestyle=':', linewidth=3)
plt.autoscale(enable=True)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.12)
plt.margins(y=0.1)
# set axis limits
if ylim_top:
ax1.set_ylim(ylim_top)
if xlim_top:
ax1.set_xlim(xlim_top)
if ylim_bot:
ax2.set_ylim(ylim_bot)
if xlim_bot:
ax2.set_xlim(xlim_bot)
if show:
plt.show()
return
def encode_decode_plot(results, plot_settings, xdata, xlabel='',
normalize=None, legend='encode', ncol=1, loc='best',
ylim_top=None, xlim_top=None,
ylim_mid=None, xlim_mid=None,
ylim_bot=None, xlim_bot=None,
show=True):
'''Create a plot with two subplots for encoding and decoding delay respectively.
args:
results: list of Dataframes.
plot_settings: List of dicts with plot settings.
xdata: Label of the X axis data ('partitions' or 'servers').
xlabel: X axis label
normalize: If a SimulatorResult is provided, all ploted results
are normalized by this one.
legend: Place the legend in the load or delay plot by setting this
argument to 'load' or 'delay'.
ylim_top, xlim_bot, ylim_bot, xlim_bot: tuples (min, max) used to set axis
limits for y/x for the top and bottom plots. Set to None to use default
axis limits.
show: show the plots if True.
'''
assert isinstance(results, list)
assert isinstance(plot_settings, list)
assert isinstance(show, bool)
# plt.rc('pgf', texsystem='pdflatex')
# plt.rc('text', usetex=True)
# plt.rcParams['text.latex.preamble'] = [r'\usepackage{lmodern}']
# _ = plt.figure(figsize=(10,9))
plt.figure()
# encode delay
plt.autoscale(enable=True)
plt.tight_layout()
ax1 = plt.subplot(311)
plt.setp(ax1.get_xticklabels(), visible=False)
for df, plot_setting in zip(results, plot_settings):
df = df.copy()
if normalize is not None:
df['encode'] /= normalize['overall_delay']
else:
df['encode'] /= df['overall_delay']
plot_result(
df,
plot_setting,
xdata,
'encode',
ylabel='Encoding Delay',
subplot=True,
)
plt.margins(y=0.1)
plt.title("encode / reduce / map")
if legend == 'encode':
plt.legend(
numpoints=1,
shadow=True,
labelspacing=0,
loc=loc,
fancybox=False,
borderaxespad=0.1,
ncol=ncol,
)
# reduce/decode delay
ax2 = plt.subplot(312, sharex=ax1)
for df, plot_setting in zip(results, plot_settings):
df = df.copy()
if normalize is not None:
df['reduce'] /= normalize['overall_delay']
else:
df['reduce'] /= df['overall_delay']
plot_result(
df,
plot_setting,
xdata,
'reduce',
xlabel=xlabel,
ylabel='Decoding Delay',
subplot=True,
)
if legend == 'decode':
plt.legend(
numpoints=1,
shadow=True,
labelspacing=0,
loc=loc,
fancybox=False,
borderaxespad=0.1,
ncol=ncol,
)
# map delay
ax3 = plt.subplot(313, sharex=ax1)
for df, plot_setting in zip(results, plot_settings):
df = df.copy()
if normalize is not None:
df['delay'] /= normalize['overall_delay']
else:
df['delay'] /= df['overall_delay']
plot_result(
df,
plot_setting,
xdata,
'delay',
xlabel=xlabel,
ylabel='Map Delay',
subplot=True,
)
if legend == 'delay':
plt.legend(
numpoints=1,
shadow=True,
labelspacing=0,
loc=loc,
fancybox=False,
borderaxespad=0.1,
ncol=ncol,
)
plt.autoscale(enable=True)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.2)
plt.margins(y=0.1)
# set axis limits
if ylim_top:
ax1.set_ylim(ylim_top)
if xlim_top:
ax1.set_xlim(xlim_top)
if ylim_mid:
ax2.set_ylim(ylim_mid)
if xlim_mid:
ax2.set_xlim(xlim_mid)
if ylim_bot:
ax3.set_ylim(ylim_bot)
if xlim_bot:
ax3.set_xlim(xlim_bot)
if show:
plt.show()
return
def plot_result(result, plot_settings, xdata, ydata, xlabel='',
ylabel='', subplot=False, normalize=None,
errorbars=False, plot_type='semilogx',
markevery=None):
'''Plot simulated results.
Args:
result: A SimulatorResult.
plot_settings: A dict with plot settings.
xdata: Label of the X axis data ('partitions' or 'servers').
ydata: Label of the Y axis data ('load' or 'delay').
xlabel: X axis label.
ylabel: Y axis label.
subplot: Set to True if the plot q is intended to be a subplot.
This will keep it from creating a new plot window, creating a
legend, and automatically showing the plot.
normalize: Normalize the plotted data by that of these results.
Must be a list of SimulationResults of length equal to results.
errorbars: Plot error bars.
'''
assert isinstance(plot_settings, dict)
assert isinstance(xlabel, str)
assert isinstance(ylabel, str)
assert isinstance(subplot, bool)
if not subplot:
_ = plt.figure()
plt.grid(True, which='both')
plt.autoscale()
label = plot_settings['label']
color = plot_settings['color']
style = color + plot_settings['marker']
xarray = result[xdata]
ymean = result[ydata].copy()
ymin = ymean.copy()
ymax = ymean.copy()
yerr = np.zeros([2, len(ymean)])
yerr[0, :] = ymean - ymin
yerr[1, :] = ymax - ymean
if normalize is not None:
ymean /= normalize[ydata]
if plot_type == 'semilogx':
plt.semilogx(
xarray,
ymean,
style,
label=label,
markevery=markevery,
markerfacecolor='none',
markeredgewidth=1.0,
)
elif plot_type == 'loglog':
plt.loglog(
xarray,
ymean,
style,
label=label,
markevery=markevery,
markerfacecolor='none',
markeredgewidth=1.0,
)
if errorbars:
plt.errorbar(xarray, ymean, yerr=yerr, fmt='none', ecolor=color)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if not subplot:
plt.legend(numpoints=1, loc='best', prop={'weight': 'bold'})
plt.show()
return
| apache-2.0 |
maniteja123/sympy | sympy/physics/quantum/state.py | 58 | 29186 | """Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}")
_rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \
u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \
u('\N{BOX DRAWINGS LIGHT VERTICAL}')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| bsd-3-clause |
duststorm/apm_planner | libs/mavlink/share/pyshared/pymavlink/examples/mavgraph.py | 29 | 5951 | #!/usr/bin/env python
'''
graph a MAVLink log file
Andrew Tridgell August 2011
'''
import sys, struct, time, os, datetime
import math, re
import pylab, pytz, matplotlib
from math import *
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from mavextra import *
locator = None
formatter = None
def plotit(x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
global locator, formatter
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if x[i][-1] - x[i][0] > xrange:
xrange = x[i][-1] - x[i][0]
xrange *= 24 * 60 * 60
if formatter is None:
if xrange < 1000:
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
else:
formatter = matplotlib.dates.DateFormatter('%H:%M')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if axes[i] == 2:
if ax2 == None:
ax2 = ax1.twinx()
ax = ax2
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle='-', marker='None', tz=None)
pylab.draw()
empty = False
if ax1_labels != []:
ax1.legend(ax1_labels,loc=opts.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=opts.legend2)
if empty:
print("No data to graph")
return
from optparse import OptionParser
parser = OptionParser("mavgraph.py [options] <filename> <fields>")
parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_option("--planner",dest="planner", action='store_true', help="use planner file format")
parser.add_option("--condition",dest="condition", default=None, help="select packets by a condition")
parser.add_option("--labels",dest="labels", default=None, help="comma separated field labels")
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
parser.add_option("--legend", default='upper left', help="default legend position")
parser.add_option("--legend2", default='upper right', help="default legend2 position")
(opts, args) = parser.parse_args()
if opts.mav10:
os.environ['MAVLINK10'] = '1'
import mavutil
if len(args) < 2:
print("Usage: mavlogdump.py [options] <LOGFILES...> <fields...>")
sys.exit(1)
filenames = []
fields = []
for f in args:
if os.path.exists(f):
filenames.append(f)
else:
fields.append(f)
msg_types = set()
multiplier = []
field_types = []
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey' ]
# work out msg types we are interested in
x = []
y = []
axes = []
first_only = []
re_caps = re.compile('[A-Z_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
y.append([])
x.append([])
axes.append(1)
first_only.append(False)
def add_data(t, msg, vars):
'''add some data'''
mtype = msg.get_type()
if mtype not in msg_types:
return
for i in range(0, len(fields)):
if mtype not in field_types[i]:
continue
f = fields[i]
if f.endswith(":2"):
axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
y[i].append(v)
x[i].append(t)
def process_file(filename):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=opts.notimestamps)
vars = {}
while True:
msg = mlog.recv_match(opts.condition)
if msg is None: break
tdays = (msg._timestamp - time.timezone) / (24 * 60 * 60)
tdays += 719163 # pylab wants it since 0001-01-01
add_data(tdays, msg, mlog.messages)
if len(filenames) == 0:
print("No files to process")
sys.exit(1)
if opts.labels is not None:
labels = opts.labels.split(',')
if len(labels) != len(fields)*len(filenames):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(filenames)))
sys.exit(1)
else:
labels = None
for fi in range(0, len(filenames)):
f = filenames[fi]
process_file(f)
for i in range(0, len(x)):
if first_only[i] and fi != 0:
x[i] = []
y[i] = []
if labels:
lab = labels[fi*len(fields):(fi+1)*len(fields)]
else:
lab = fields[:]
plotit(x, y, lab, colors=colors[fi*len(fields):])
for i in range(0, len(x)):
x[i] = []
y[i] = []
pylab.show()
raw_input('press enter to exit....')
| agpl-3.0 |
davidwaroquiers/pymatgen | pymatgen/analysis/graphs.py | 1 | 110685 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module for graph representations of crystals.
"""
import copy
import logging
import os.path
import subprocess
import warnings
from collections import defaultdict, namedtuple
from itertools import combinations
from operator import itemgetter
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
from monty.json import MSONable
from monty.os.path import which
from networkx.drawing.nx_agraph import write_dot
from networkx.readwrite import json_graph
from scipy.spatial import KDTree
from scipy.stats import describe
from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure
from pymatgen.core.structure import FunctionalGroups
from pymatgen.util.coord import lattice_points_in_supercell
from pymatgen.vis.structure_vtk import EL_COLORS
try:
import igraph
IGRAPH_AVAILABLE = True
except ImportError:
IGRAPH_AVAILABLE = False
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__author__ = "Matthew Horton, Evan Spotte-Smith, Samuel Blau"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "August 2017"
ConnectedSite = namedtuple("ConnectedSite", "site, jimage, index, weight, dist")
def _compare(g1, g2, i1, i2):
"""
Helper function called by isomorphic to ensure comparison of node identities.
"""
return g1.vs[i1]["species"] == g2.vs[i2]["species"]
def _igraph_from_nxgraph(graph):
"""
Helper function that converts a networkx graph object into an igraph graph object.
"""
nodes = graph.nodes(data=True)
new_igraph = igraph.Graph()
for node in nodes:
new_igraph.add_vertex(name=str(node[0]), species=node[1]["specie"], coords=node[1]["coords"])
new_igraph.add_edges([(str(edge[0]), str(edge[1])) for edge in graph.edges()])
return new_igraph
def _isomorphic(frag1, frag2):
"""
Internal function to check if two graph objects are isomorphic, using igraph if
if is available and networkx if it is not.
"""
f1_nodes = frag1.nodes(data=True)
f2_nodes = frag2.nodes(data=True)
if len(f1_nodes) != len(f2_nodes):
return False
f2_edges = frag2.edges()
if len(f2_edges) != len(f2_edges):
return False
f1_comp_dict = {}
f2_comp_dict = {}
for node in f1_nodes:
if node[1]["specie"] not in f1_comp_dict:
f1_comp_dict[node[1]["specie"]] = 1
else:
f1_comp_dict[node[1]["specie"]] += 1
for node in f2_nodes:
if node[1]["specie"] not in f2_comp_dict:
f2_comp_dict[node[1]["specie"]] = 1
else:
f2_comp_dict[node[1]["specie"]] += 1
if f1_comp_dict != f2_comp_dict:
return False
if IGRAPH_AVAILABLE:
ifrag1 = _igraph_from_nxgraph(frag1)
ifrag2 = _igraph_from_nxgraph(frag2)
return ifrag1.isomorphic_vf2(ifrag2, node_compat_fn=_compare)
nm = iso.categorical_node_match("specie", "ERROR")
return nx.is_isomorphic(frag1.to_undirected(), frag2.to_undirected(), node_match=nm)
class StructureGraph(MSONable):
"""
This is a class for annotating a Structure with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, structure, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given crystallographic
structure easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
For periodic graphs, class stores information on the graph
edges of what lattice image the edge belongs to.
:param structure: a Structure object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(structure, StructureGraph):
# just make a copy from input
graph_data = structure.as_dict()["graphs"]
self.structure = structure
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
@classmethod
def with_empty_graph(cls, structure, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for StructureGraph, returns a StructureGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Structure).
:param structure (Structure):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (StructureGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(structure)))
graph_data = json_graph.adjacency_data(graph)
return cls(structure, graph_data=graph_data)
@staticmethod
def with_edges(structure, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(from_index, to_index, from_image, to_image): props},
where props is a dictionary of properties, including weight.
Props should be None if no additional properties are to be
specified.
:return: sg, a StructureGraph
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
from_image = edge[2]
to_image = edge[3]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index," " from_image, to_image) tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = sg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
sg.add_edge(
from_index,
to_index,
from_jimage=from_image,
to_jimage=to_image,
weight=weight,
edge_properties=props,
)
sg.set_node_attributes()
return sg
@staticmethod
def with_local_env_strategy(structure, strategy, weights=False):
"""
Constructor for StructureGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param structure: Structure object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:param weights: if True, use weights from local_env class
(consult relevant class for their meaning)
:return:
"""
if not strategy.structures_allowed:
raise ValueError(
"Chosen strategy is not designed for use with structures! " "Please choose another strategy."
)
sg = StructureGraph.with_empty_graph(structure, name="bonds")
for n, neighbors in enumerate(strategy.get_all_nn_info(structure)):
for neighbor in neighbors:
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
sg.add_edge(
from_index=n,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"] if weights else None,
warn_duplicates=False,
)
return sg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
from_jimage=(0, 0, 0),
to_jimage=None,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param from_jimage (tuple of ints): lattice vector of periodic
image, e.g. (1, 0, 0) for periodic image in +x direction
:param to_jimage (tuple of ints): lattice vector of image
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
to_jimage, from_jimage = from_jimage, to_jimage
# constrain all from_jimages to be (0, 0, 0),
# initial version of this class worked even if
# from_jimage != (0, 0, 0), but making this
# assumption simplifies logic later
if not np.array_equal(from_jimage, (0, 0, 0)):
shift = from_jimage
from_jimage = np.subtract(from_jimage, shift)
to_jimage = np.subtract(to_jimage, shift)
# automatic detection of to_jimage if user doesn't specify
# will try and detect all equivalent images and add multiple
# edges if appropriate
if to_jimage is None:
# assume we want the closest site
warnings.warn("Please specify to_jimage to be unambiguous, " "trying to automatically detect.")
dist, to_jimage = self.structure[from_index].distance_and_image(self.structure[to_index])
if dist == 0:
# this will happen when from_index == to_index,
# typically in primitive single-atom lattices
images = [1, 0, 0], [0, 1, 0], [0, 0, 1]
dists = []
for image in images:
dists.append(
self.structure[from_index].distance_and_image(self.structure[from_index], jimage=image)[0]
)
dist = min(dists)
equiv_sites = self.structure.get_neighbors_in_shell(
self.structure[from_index].coords, dist, dist * 0.01, include_index=True
)
for nnsite in equiv_sites:
to_jimage = np.subtract(nnsite.frac_coords, self.structure[from_index].frac_coords)
to_jimage = np.round(to_jimage).astype(int)
self.add_edge(
from_index=from_index,
from_jimage=(0, 0, 0),
to_jimage=to_jimage,
to_index=nnsite.index,
)
return
# sanitize types
from_jimage, to_jimage = (
tuple(map(int, from_jimage)),
tuple(map(int, to_jimage)),
)
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between a given (site, jimage) pair and another
# (site, jimage) pair
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data:
for key, d in existing_edge_data.items():
if d["to_jimage"] == to_jimage:
if warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from "
"site {} to site {} in {}.".format(from_index, to_index, to_jimage)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, **edge_properties)
def insert_node(
self,
i,
species,
coords,
coords_are_cartesian=False,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param coords_are_cartesian: Whether coordinates are cartesian.
Defaults to False.
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.structure.insert(
i,
species,
coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.structure) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
from_jimage=(0, 0, 0),
to_jimage=edge["to_jimage"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Gives each node a "specie" and a "coords" attribute, updated with the
current species and coordinates.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.structure[node].specie.symbol
coords[node] = self.structure[node].coords
properties[node] = self.structure[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(
self,
from_index,
to_index,
to_jimage=None,
new_weight=None,
new_edge_properties=None,
):
"""
Alters either the weight or the edge_properties of
an edge in the StructureGraph.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edges = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edges:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
if to_jimage is None:
edge_index = 0
else:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
if new_weight is not None:
self.graph[from_index][to_index][edge_index]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):
"""
Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edges = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if to_jimage is None:
raise ValueError("Image must be supplied, to avoid ambiguity.")
if existing_edges:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(from_index, to_index, edge_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
for i, properties in existing_reverse.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(to_index, from_index, edge_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.structure.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Structure.substitute to replace an atom in self.structure
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: Care must be taken to ensure that the functional group that is
substituted will not place atoms to close to each other, or violate the
dimensions of the Lattice.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.structure) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.structure.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "to_jimage" in edge_props.keys():
to_jimage = edge_props["to_jimage"]
del edge_props["to_jimage"]
else:
# By default, assume that all edges should stay remain
# inside the initial image
to_jimage = (0, 0, 0)
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
to_jimage=to_jimage,
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
for site in mapping.values():
neighbors = strat.get_nn_info(self.structure, site)
for neighbor in neighbors:
self.add_edge(
from_index=site,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"],
warn_duplicates=False,
)
def get_connected_sites(self, n, jimage=(0, 0, 0)):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
connected_site_images = set()
out_edges = [(u, v, d, "out") for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d, "in") for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d, dir in out_edges + in_edges:
to_jimage = d["to_jimage"]
if dir == "in":
u, v = v, u
to_jimage = np.multiply(-1, to_jimage)
to_jimage = tuple(map(int, np.add(to_jimage, jimage)))
site_d = self.structure[v].as_dict()
site_d["abc"] = np.add(site_d["abc"], to_jimage).tolist()
site = PeriodicSite.from_dict(site_d)
# from_site if jimage arg != (0, 0, 0)
relative_jimage = np.subtract(to_jimage, jimage)
dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)
weight = d.get("weight", None)
if (v, to_jimage) not in connected_site_images:
connected_site = ConnectedSite(site=site, jimage=to_jimage, index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
connected_site_images.add((v, to_jimage))
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
to_image = d["to_jimage"]
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.nodes[u]["fillcolor"]
color_v = g.nodes[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
@property
def types_and_weights_of_connections(self):
"""
Extract a dictionary summarizing the types and weights
of edges in the graph.
:return: A dictionary with keys specifying the
species involved in a connection in alphabetical order
(e.g. string 'Fe-O') and values which are a list of
weights for those connections (e.g. bond lengths).
"""
def get_label(u, v):
u_label = self.structure[u].species_string
v_label = self.structure[v].species_string
return "-".join(sorted((u_label, v_label)))
types = defaultdict(list)
for u, v, d in self.graph.edges(data=True):
label = get_label(u, v)
types[label].append(d["weight"])
return dict(types)
@property
def weight_statistics(self):
"""
Extract a statistical summary of edge weights present in
the graph.
:return: A dict with an 'all_weights' list, 'minimum',
'maximum', 'median', 'mean', 'std_dev'
"""
all_weights = [d.get("weight", None) for u, v, d in self.graph.edges(data=True)]
stats = describe(all_weights, nan_policy="omit")
return {
"all_weights": all_weights,
"min": stats.minmax[0],
"max": stats.minmax[1],
"mean": stats.mean,
"variance": stats.variance,
}
def types_of_coordination_environments(self, anonymous=False):
"""
Extract information on the different co-ordination environments
present in the graph.
:param anonymous: if anonymous, will replace specie names
with A, B, C, etc.
:return: a list of co-ordination environments,
e.g. ['Mo-S(6)', 'S-Mo(3)']
"""
motifs = set()
for idx, site in enumerate(self.structure):
centre_sp = site.species_string
connected_sites = self.get_connected_sites(idx)
connected_species = [connected_site.site.species_string for connected_site in connected_sites]
labels = []
for sp in set(connected_species):
count = connected_species.count(sp)
labels.append((count, sp))
labels = sorted(labels, reverse=True)
if anonymous:
mapping = {centre_sp: "A"}
available_letters = [chr(66 + i) for i in range(25)]
for label in labels:
sp = label[1]
if sp not in mapping:
mapping[sp] = available_letters.pop(0)
centre_sp = "A"
labels = [(label[0], mapping[label[1]]) for label in labels]
labels = ["{}({})".format(label[1], label[0]) for label in labels]
motif = "{}-{}".format(centre_sp, ",".join(labels))
motifs.add(motif)
return sorted(list(motifs))
def as_dict(self):
"""
As in :Class: `pymatgen.core.Structure` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Structure` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
s = Structure.from_dict(d["structure"])
return cls(s, d["graphs"])
def __mul__(self, scaling_matrix):
"""
Replicates the graph, creating a supercell,
intelligently joining together
edges that lie on periodic boundaries.
In principle, any operations on the expanded
graph could also be done on the original
graph, but a larger graph can be easier to
visualize and reason about.
:param scaling_matrix: same as Structure.__mul__
:return:
"""
# Developer note: a different approach was also trialed, using
# a simple Graph (instead of MultiDiGraph), with node indices
# representing both site index and periodic image. Here, the
# number of nodes != number of sites in the Structure. This
# approach has many benefits, but made it more difficult to
# keep the graph in sync with its corresponding Structure.
# Broadly, it would be easier to multiply the Structure
# *before* generating the StructureGraph, but this isn't
# possible when generating the graph using critic2 from
# charge density.
# Multiplication works by looking for the expected position
# of an image node, and seeing if that node exists in the
# supercell. If it does, the edge is updated. This is more
# computationally expensive than just keeping track of the
# which new lattice images present, but should hopefully be
# easier to extend to a general 3x3 scaling matrix.
# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError("Not tested with 3x3 scaling matrices yet.")
new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
new_graphs = []
for v in c_lat:
# create a map of nodes from original graph to its image
mapping = {n: n + len(new_sites) for n in range(len(self.structure))}
for idx, site in enumerate(self.structure):
s = PeriodicSite(
site.species,
site.coords + v,
new_lattice,
properties=site.properties,
coords_are_cartesian=True,
to_unit_cell=False,
)
new_sites.append(s)
new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))
new_structure = Structure.from_sites(new_sites)
# merge all graphs into one big graph
new_g = nx.MultiDiGraph()
for new_graph in new_graphs:
new_g = nx.union(new_g, new_graph)
edges_to_remove = [] # tuple of (u, v, k)
edges_to_add = [] # tuple of (u, v, attr_dict)
# list of new edges inside supercell
# for duplicate checking
edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True) if d["to_jimage"] == (0, 0, 0)]
new_periodic_images = []
orig_lattice = self.structure.lattice
# use k-d tree to match given position to an
# existing Site in Structure
kd_tree = KDTree(new_structure.cart_coords)
# tolerance in Å for sites to be considered equal
# this could probably be a lot smaller
tol = 0.05
for u, v, k, d in new_g.edges(keys=True, data=True):
to_jimage = d["to_jimage"] # for node v
# reduce unnecessary checking
if to_jimage != (0, 0, 0):
# get index in original site
n_u = u % len(self.structure)
n_v = v % len(self.structure)
# get fractional co-ordinates of where atoms defined
# by edge are expected to be, relative to original
# lattice (keeping original lattice has
# significant benefits)
v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)
u_frac = self.structure[n_u].frac_coords
# using the position of node u as a reference,
# get relative Cartesian co-ordinates of where
# atoms defined by edge are expected to be
v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)
u_cart = orig_lattice.get_cartesian_coords(u_frac)
v_rel = np.subtract(v_image_cart, u_cart)
# now retrieve position of node v in
# new supercell, and get asgolute Cartesian
# co-ordinates of where atoms defined by edge
# are expected to be
v_expec = new_structure[u].coords + v_rel
# now search in new structure for these atoms
# query returns (distance, index)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
# check if image sites now present in supercell
# and if so, delete old edge that went through
# periodic boundary
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
# node now inside supercell
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
# make sure we don't try to add duplicate edges
# will remove two edges for everyone one we add
if {new_u, new_v} not in edges_inside_supercell:
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
edges_inside_supercell.append({new_u, new_v})
edges_to_add.append((new_u, new_v, new_d))
else:
# want to find new_v such that we have
# full periodic boundary conditions
# so that nodes on one side of supercell
# are connected to nodes on opposite side
v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)
# find new to_jimage
# use np.around to fix issues with finite precision leading to incorrect image
v_expec_image = np.around(v_expec_frac, decimals=3)
v_expec_image = v_expec_image - v_expec_image % 1
v_expec_frac = np.subtract(v_expec_frac, v_expec_image)
v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
new_to_jimage = tuple(map(int, v_expec_image))
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
new_to_jimage = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
new_d["to_jimage"] = new_to_jimage
edges_to_remove.append((u, v, k))
if (new_u, new_v, new_to_jimage) not in new_periodic_images:
edges_to_add.append((new_u, new_v, new_d))
new_periodic_images.append((new_u, new_v, new_to_jimage))
logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove), len(edges_to_add)))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
new_g.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
new_g.add_edge(u, v, **d)
# return new instance of StructureGraph with supercell
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": new_structure.as_dict(),
"graphs": json_graph.adjacency_data(new_g),
}
sg = StructureGraph.from_dict(d)
return sg
def __rmul__(self, other):
return self.__mul__(other)
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Structure / number of nodes in graph
"""
return len(self.structure)
def sort(self, key=None, reverse=False):
"""
Same as Structure.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_structure = self.structure.copy()
# sort Structure
self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)
# apply Structure ordering to graph
mapping = {idx: self.structure.index(site) for idx, site in enumerate(old_structure)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return StructureGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two StructureGraphs are equal if they have equal Structures,
and have the same edges between Sites. Edge weights can be
different and StructureGraphs can still be considered equal.
:param other: StructureGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.structure == other_sorted.structure)
def diff(self, other, strict=True):
"""
Compares two StructureGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one StructureGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if " "corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {
(str(self.structure[u].specie), str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, "_supercell_sg", None) is None:
self._supercell_sg = supercell_sg = self * (3, 3, 3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = [supercell_sg.graph.subgraph(c) for c in nx.connected_components(supercell_sg.graph)]
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any([d["to_jimage"] != (0, 0, 0) for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
molecule_subgraphs.append(nx.MultiDiGraph(subgraph))
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1["specie"] == n2["specie"]
def edge_match(e1, e2):
if use_weights:
return e1["weight"] == e2["weight"]
return True
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [
nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs
]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
class MolGraphSplitError(Exception):
"""
Raised when a molecule graph is failed to split into two disconnected
subgraphs
"""
pass
class MoleculeGraph(MSONable):
"""
This is a class for annotating a Molecule with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, molecule, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given molecule easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
:param molecule: Molecule object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(molecule, MoleculeGraph):
# just make a copy from input
graph_data = molecule.as_dict()["graphs"]
self.molecule = molecule
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
self.set_node_attributes()
@classmethod
def with_empty_graph(cls, molecule, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data)
@staticmethod
def with_edges(molecule, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. Props should be None if no
additional properties are to be specified.
:return: mg, a MoleculeGraph
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index)" "tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = mg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
mg.add_edge(from_index, to_index, weight=weight, edge_properties=props)
mg.set_node_attributes()
return mg
@staticmethod
def with_local_env_strategy(molecule, strategy):
"""
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:return: mg, a MoleculeGraph
"""
if not strategy.molecules_allowed:
raise ValueError(
"Chosen strategy is not designed for use with molecules! " "Please choose another strategy."
)
extend_structure = strategy.extend_structure_molecules
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
# NearNeighbor classes only (generally) work with structures
# molecules have to be boxed first
coords = molecule.cart_coords
if extend_structure:
a = max(coords[:, 0]) - min(coords[:, 0]) + 100
b = max(coords[:, 1]) - min(coords[:, 1]) + 100
c = max(coords[:, 2]) - min(coords[:, 2]) + 100
structure = molecule.get_boxed_structure(a, b, c, no_cross=True, reorder=False)
else:
structure = None
for n in range(len(molecule)):
if structure is None:
neighbors = strategy.get_nn_info(molecule, n)
else:
neighbors = strategy.get_nn_info(structure, n)
for neighbor in neighbors:
# all bonds in molecules should not cross
# (artificial) periodic boundaries
if not np.array_equal(neighbor["image"], [0, 0, 0]):
continue
if n > neighbor["site_index"]:
from_index = neighbor["site_index"]
to_index = n
else:
from_index = n
to_index = neighbor["site_index"]
mg.add_edge(
from_index=from_index,
to_index=to_index,
weight=neighbor["weight"],
warn_duplicates=False,
)
duplicates = []
for edge in mg.graph.edges:
if edge[2] != 0:
duplicates.append(edge)
for duplicate in duplicates:
mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2])
mg.set_node_attributes()
return mg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
# sanitize types
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between two sites
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data and warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, **edge_properties)
def insert_node(
self,
i,
species,
coords,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.molecule.insert(
i,
species,
coords,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.molecule) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Replicates molecule site properties (specie, coords, etc.) in the
MoleculeGraph.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.molecule[node].specie.symbol
coords[node] = self.molecule[node].coords
properties[node] = self.molecule[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(self, from_index, to_index, new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edge = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edge:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
# Third index should always be 0 because there should only be one edge between any two nodes
if new_weight is not None:
self.graph[from_index][to_index][0]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, allow_reverse=False):
"""
Remove an edge from the MoleculeGraph
:param from_index: int
:param to_index: int
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edge = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if existing_edge:
self.graph.remove_edge(from_index, to_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
self.graph.remove_edge(to_index, from_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.molecule.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def split_molecule_subgraphs(self, bonds, allow_reverse=False, alterations=None):
"""
Split MoleculeGraph into two or more MoleculeGraphs by
breaking a set of bonds. This function uses
MoleculeGraph.break_edge repeatedly to create
disjoint graphs (two or more separate molecules).
This function does not only alter the graph
information, but also changes the underlying
Moledules.
If the bonds parameter does not include sufficient
bonds to separate two molecule fragments, then this
function will fail.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:param bonds: list of tuples (from_index, to_index)
representing bonds to be broken to split the MoleculeGraph.
:param alterations: a dict {(from_index, to_index): alt},
where alt is a dictionary including weight and/or edge
properties to be changed following the split.
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: list of MoleculeGraphs
"""
self.set_node_attributes()
original = copy.deepcopy(self)
for bond in bonds:
original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)
if nx.is_weakly_connected(original.graph):
raise MolGraphSplitError("Cannot split molecule; MoleculeGraph is still connected.")
# alter any bonds before partition, to avoid remapping
if alterations is not None:
for (u, v) in alterations.keys():
if "weight" in alterations[(u, v)]:
weight = alterations[(u, v)]["weight"]
del alterations[(u, v)]["weight"]
edge_properties = alterations[(u, v)] if len(alterations[(u, v)]) != 0 else None
original.alter_edge(u, v, new_weight=weight, new_edge_properties=edge_properties)
else:
original.alter_edge(u, v, new_edge_properties=alterations[(u, v)])
sub_mols = []
# Had to use nx.weakly_connected_components because of deprecation
# of nx.weakly_connected_component_subgraphs
subgraphs = [original.graph.subgraph(c) for c in nx.weakly_connected_components(original.graph)]
for subg in subgraphs:
nodes = sorted(list(subg.nodes))
# Molecule indices are essentially list-based, so node indices
# must be remapped, incrementing from 0
mapping = {n: i for i, n in enumerate(nodes)}
# just give charge to whatever subgraph has node with index 0
# TODO: actually figure out how to distribute charge
if 0 in nodes:
charge = self.molecule.charge
else:
charge = 0
# relabel nodes in graph to match mapping
new_graph = nx.relabel_nodes(subg, mapping)
species = nx.get_node_attributes(new_graph, "specie")
coords = nx.get_node_attributes(new_graph, "coords")
raw_props = nx.get_node_attributes(new_graph, "properties")
properties = {}
for prop_set in raw_props.values():
for prop in prop_set.keys():
if prop in properties:
properties[prop].append(prop_set[prop])
else:
properties[prop] = [prop_set[prop]]
# Site properties must be present for all atoms in the molecule
# in order to be used for Molecule instantiation
for k, v in properties.items():
if len(v) != len(species):
del properties[k]
new_mol = Molecule(species, coords, charge=charge, site_properties=properties)
graph_data = json_graph.adjacency_data(new_graph)
# create new MoleculeGraph
sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))
return sub_mols
def build_unique_fragments(self):
"""
Find all possible fragment combinations of the MoleculeGraphs (in other
words, all connected induced subgraphs)
:return:
"""
self.set_node_attributes()
graph = self.graph.to_undirected()
# find all possible fragments, aka connected induced subgraphs
frag_dict = {}
for ii in range(1, len(self.molecule)):
for combination in combinations(graph.nodes, ii):
mycomp = []
for idx in combination:
mycomp.append(str(self.molecule[idx].specie))
mycomp = "".join(sorted(mycomp))
subgraph = nx.subgraph(graph, combination)
if nx.is_connected(subgraph):
mykey = mycomp + str(len(subgraph.edges()))
if mykey not in frag_dict:
frag_dict[mykey] = [copy.deepcopy(subgraph)]
else:
frag_dict[mykey].append(copy.deepcopy(subgraph))
# narrow to all unique fragments using graph isomorphism
unique_frag_dict = {}
for key in frag_dict:
unique_frags = []
for frag in frag_dict[key]:
found = False
for f in unique_frags:
if _isomorphic(frag, f):
found = True
break
if not found:
unique_frags.append(frag)
unique_frag_dict[key] = copy.deepcopy(unique_frags)
# convert back to molecule graphs
unique_mol_graph_dict = {}
for key in unique_frag_dict:
unique_mol_graph_list = []
for fragment in unique_frag_dict[key]:
mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))}
remapped = nx.relabel_nodes(fragment, mapping)
species = nx.get_node_attributes(remapped, "specie")
coords = nx.get_node_attributes(remapped, "coords")
edges = {}
for from_index, to_index, key in remapped.edges:
edge_props = fragment.get_edge_data(from_index, to_index, key=key)
edges[(from_index, to_index)] = edge_props
unique_mol_graph_list.append(
self.with_edges(
Molecule(species=species, coords=coords, charge=self.molecule.charge),
edges,
)
)
frag_key = (
str(unique_mol_graph_list[0].molecule.composition.alphabetical_formula)
+ " E"
+ str(len(unique_mol_graph_list[0].graph.edges()))
)
unique_mol_graph_dict[frag_key] = copy.deepcopy(unique_mol_graph_list)
return unique_mol_graph_dict
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute to replace an atom in self.molecule
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: using a MoleculeGraph will generally produce a different graph
compared with using a Molecule or str (when not using graph_dict).
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.molecule) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
# Work is simplified if a graph is already in place
if isinstance(func_grp, MoleculeGraph):
self.molecule.substitute(index, func_grp.molecule, bond_order=bond_order)
mapping = map_indices(func_grp.molecule)
for (u, v) in list(func_grp.graph.edges()):
edge_props = func_grp.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props)
else:
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.molecule.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
graph = self.with_local_env_strategy(func_grp, strat)
for (u, v) in list(graph.graph.edges()):
edge_props = graph.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
if 0 not in list(graph.graph.nodes()):
# If graph indices have different indexing
u, v = (u - 1), (v - 1)
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
def replace_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute and MoleculeGraph.substitute_group
to replace a functional group in self.molecule with a functional group.
This method also amends self.graph to incorporate the new functional
group.
TODO: Figure out how to replace into a ring structure.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
self.set_node_attributes()
neighbors = self.get_connected_sites(index)
# If the atom at index is terminal
if len(neighbors) == 1:
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
else:
rings = self.find_rings(including=[index])
if len(rings) != 0:
raise RuntimeError(
"Currently functional group replacement" "cannot occur at an atom within a ring" "structure."
)
to_remove = set()
sizes = dict()
disconnected = self.graph.to_undirected()
disconnected.remove_node(index)
for neighbor in neighbors:
sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))
keep = max(sizes, key=lambda x: sizes[x])
for i in sizes.keys():
if i != keep:
to_remove.add(i)
self.remove_nodes(list(to_remove))
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i - 1], e))
cycles_edges.append(edges)
return cycles_edges
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = list(self.graph.out_edges(n, data=True))
in_edges = list(self.graph.in_edges(n, data=True))
for u, v, d in out_edges + in_edges:
weight = d.get("weight", None)
if v == n:
site = self.molecule[u]
dist = self.molecule[v].distance(self.molecule[u])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=u, weight=weight, dist=dist)
else:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
if "to_image" in d:
to_image = d["to_jimage"]
else:
to_image = (0, 0, 0)
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.node[u]["fillcolor"]
color_v = g.node[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Molecule` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
m = Molecule.from_dict(d["molecule"])
return cls(m, d["graphs"])
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Molecule / number of nodes in graph
"""
return len(self.molecule)
def sort(self, key=None, reverse=False):
"""
Same as Molecule.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_molecule = self.molecule.copy()
# sort Molecule
self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)
# apply Molecule ordering to graph
mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return MoleculeGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two MoleculeGraphs are equal if they have equal Molecules,
and have the same edges between Sites. Edge weights can be
different and MoleculeGraphs can still be considered equal.
:param other: MoleculeGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
try:
mapping = {tuple(site.coords): self.molecule.index(site) for site in other.molecule}
except ValueError:
return False
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])
edges = {(u, v) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.molecule == other_sorted.molecule)
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
:param other: MoleculeGraph object to be compared.
:return: bool
"""
if len(self.molecule) != len(other.molecule):
return False
if self.molecule.composition.alphabetical_formula != other.molecule.composition.alphabetical_formula:
return False
if len(self.graph.edges()) != len(other.graph.edges()):
return False
return _isomorphic(self.graph, other.graph)
def diff(self, other, strict=True):
"""
Compares two MoleculeGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one MoleculeGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two MoleculeGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the MoleculeGraph this method is called
from, not the 'other' MoleculeGraph: there
is no guarantee the node indices will be the
same if the underlying Molecules are ordered
differently.
:param other: MoleculeGraph
:param strict: if False, will compare bonds
from different Molecules, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.molecule != other.molecule and strict:
return ValueError("Meaningless to compare MoleculeGraphs if " "corresponding Molecules are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.molecule.index(site) for site in other.molecule}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {
(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in other_sorted.graph.edges(keys=False, data=True)
}
else:
edges = {
(str(self.molecule[u].specie), str(self.molecule[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
| mit |
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/almost_ideal_demand_system/aids_dataframe_builder_coicop.py | 4 | 8551 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 08 16:45:12 2015
@author: thomas.douenne
"""
from __future__ import division
import pandas as pd
import numpy as np
from pandas import concat
from openfisca_france_indirect_taxation.examples.utils_example import get_input_data_frame
from openfisca_france_indirect_taxation.almost_ideal_demand_system.aids_price_index_builder import \
df_indice_prix_produit
# Now that we have our price indexes, we construct a dataframe with the rest of the information
data_frame_for_reg = None
for year in [2000, 2005, 2011]:
aggregates_data_frame = get_input_data_frame(2011)
aggregates_data_frame['depenses_tot'] = 0
for i in range(1, 13):
aggregates_data_frame['depenses_tot'] += aggregates_data_frame['coicop12_{}'.format(i)]
produits = [column for column in aggregates_data_frame.columns if column.isdigit()]
data = aggregates_data_frame[produits + ['vag']].copy()
data.index.name = 'ident_men'
data.reset_index(inplace = True)
df = pd.melt(data, id_vars = ['vag', 'ident_men'], value_vars=produits,
value_name = 'depense_bien', var_name = 'bien')
df_indice_prix_produit = df_indice_prix_produit[['indice_prix_produit'] + ['prix'] + ['temps'] + ['mois']]
df['vag'] = df['vag'].astype(str)
df['indice_prix_produit'] = df['vag'] + '_' + df['bien']
df['indice_prix_produit'] = df['indice_prix_produit'].str.replace('_0', '')
df['indice_prix_produit'] = df['indice_prix_produit'].str.replace('_', '')
df['coicop_12_numero'] = df['bien'].str[:2]
df = df[['ident_men'] + ['coicop_12_numero'] + ['indice_prix_produit'] + ['depense_bien'] + ['vag']]
df = pd.merge(df, df_indice_prix_produit, on = 'indice_prix_produit')
df_temps = df[['vag'] + ['temps'] + ['mois']]
df_temps['mois'] = df_temps['mois'].astype(float)
df_temps['mois2'] = df_temps['mois'] ** 2
df_temps = df_temps.drop_duplicates(cols='vag', take_last=True)
df_temps = df_temps.astype(float)
# Construct the price index by coicop:
df['coicop_12_numero'] = df['coicop_12_numero'].astype(int) # Goal : transform 1.0 into 1 to merge with same id.
df = df.astype(str)
df['id'] = df['coicop_12_numero'] + '_' + df['ident_men']
df_depense_coicop = None
for i in range(1, 13):
if df_depense_coicop is not None:
df_depense_coicop = concat([df_depense_coicop, aggregates_data_frame['coicop12_{}'.format(i)]], axis = 1)
else:
df_depense_coicop = aggregates_data_frame['coicop12_{}'.format(i)]
list_coicop12 = [column for column in df_depense_coicop.columns]
df_depense_coicop.index.name = 'ident_men'
df_depense_coicop.reset_index(inplace = True)
df_depense_coicop = pd.melt(df_depense_coicop, id_vars = ['ident_men'], value_vars = list_coicop12)
df_depense_coicop.rename(columns = {'value': 'depense_par_coicop'}, inplace = True)
df_depense_coicop.rename(columns = {'variable': 'numero_coicop'}, inplace = True)
df_depense_coicop['numero_coicop'] = df_depense_coicop['numero_coicop'].str.split('coicop12_').str[1]
df_depense_coicop = df_depense_coicop.astype(str)
df_depense_coicop['id'] = df_depense_coicop['numero_coicop'] + '_' + df_depense_coicop['ident_men']
df_to_merge = df_depense_coicop[['id'] + ['depense_par_coicop']]
df = pd.merge(df, df_to_merge, on = 'id')
df[['prix'] + ['depense_bien'] + ['depense_par_coicop']] = (
df[['prix'] + ['depense_bien'] + ['depense_par_coicop']].astype(float)
)
df['part_bien_coicop'] = df['depense_bien'] / df['depense_par_coicop']
df.fillna(0, inplace=True)
df['indice_prix_pondere'] = df['part_bien_coicop'] * df['prix']
df.sort(['id'])
grouped = df['indice_prix_pondere'].groupby(df['id'])
grouped = grouped.aggregate(np.sum)
grouped.index.name = 'id'
grouped = grouped.reset_index()
# Import information about households, including niveau_vie_decile
# (To do: Obviously there are mistakes in its computation, check why).
df_info_menage = aggregates_data_frame[['ocde10'] + ['depenses_tot'] + ['vag'] + ['typmen'] + ['revtot'] +
['poste_coicop_2201'] + ['poste_coicop_2202'] + ['poste_coicop_2203']]
df_info_menage['fumeur'] = 0
df_info_menage[['poste_coicop_2201'] + ['poste_coicop_2202'] + ['poste_coicop_2203']] = \
df_info_menage[['poste_coicop_2201'] + ['poste_coicop_2202'] + ['poste_coicop_2203']].astype(float)
df_info_menage['consommation_tabac'] = (
df_info_menage['poste_coicop_2201'] + df_info_menage['poste_coicop_2202'] + df_info_menage['poste_coicop_2203']
)
df_info_menage['fumeur'] = 1 * (df_info_menage['consommation_tabac'] > 0)
df_info_menage.drop(['consommation_tabac', 'poste_coicop_2201', 'poste_coicop_2202', 'poste_coicop_2203'],
inplace = True, axis = 1)
df_info_menage.index.name = 'ident_men'
df_info_menage.reset_index(inplace = True)
df_info_menage['ident_men'] = df_info_menage['ident_men'].astype(str)
data_frame = pd.merge(df_depense_coicop, df_info_menage, on = 'ident_men')
data_frame = pd.merge(data_frame, grouped, on = 'id')
data_frame[['depenses_tot'] + ['depense_par_coicop']] = (
data_frame[['depenses_tot'] + ['depense_par_coicop']].astype(float)
)
data_frame['wi'] = data_frame['depense_par_coicop'] / data_frame['depenses_tot']
data_frame = data_frame.astype(str)
# By construction, those who don't consume in coicop_i have a price index of 0 for this coicop.
# We replace it with the price index of the whole coicop at the same vag.
data_frame['indice_prix_produit'] = data_frame['vag'] + data_frame['numero_coicop'] + '000'
df_indice_prix_produit['prix'] = df_indice_prix_produit['prix'].astype(float)
df_indice_prix_produit['prix_coicop'] = df_indice_prix_produit['prix']
df_indice_prix_produit_to_merge = df_indice_prix_produit[['indice_prix_produit'] + ['prix_coicop']]
data_frame = pd.merge(data_frame, df_indice_prix_produit_to_merge, on = 'indice_prix_produit')
data_frame['indice_prix_pondere'] = data_frame['indice_prix_pondere'].astype(float)
data_frame.loc[data_frame['indice_prix_pondere'] == 0, 'indice_prix_pondere'] = \
data_frame.loc[data_frame['indice_prix_pondere'] == 0, 'prix_coicop']
data_frame = data_frame.drop(['prix_coicop', 'indice_prix_produit'], axis = 1)
# Reshape the dataframe to have the price index of each coicop as a variable
data_frame_prix = data_frame[['numero_coicop'] + ['ident_men'] + ['indice_prix_pondere']]
data_frame_prix.index.name = 'ident_men'
data_frame_prix = pd.pivot_table(data_frame_prix, index='ident_men', columns='numero_coicop',
values='indice_prix_pondere')
data_frame_prix.reset_index(inplace = True)
data_frame = pd.merge(data_frame, data_frame_prix, on = 'ident_men')
for i in range(1, 13):
data_frame.rename(columns = {'{}'.format(i): 'p{}'.format(i)}, inplace = True)
del data_frame['id']
data_frame = data_frame.astype(float)
data_frame['depenses_par_uc'] = data_frame['depenses_tot'] / data_frame['ocde10']
data_frame = pd.merge(data_frame, df_temps, on = 'vag')
data_frame['numero_coicop'] = data_frame['numero_coicop'].astype(int)
data_frame['numero_coicop'] = data_frame['numero_coicop'].astype(str)
data_frame2 = pd.pivot_table(data_frame, index = 'ident_men', columns = 'numero_coicop',
values = 'wi')
for i in range(1, 13):
data_frame2.rename(columns = {'{}'.format(i): 'w{}'.format(i)}, inplace = True)
data_frame2.index.name = 'ident_men'
data_frame2 = data_frame2.reset_index()
data_frame = pd.merge(data_frame, data_frame2, on = 'ident_men')
data_frame = data_frame.drop_duplicates(cols = 'ident_men', take_last = True)
data_frame.drop(
['depense_par_coicop', 'depenses_tot', 'indice_prix_pondere', 'wi', 'numero_coicop'],
inplace = True, axis = 1
)
data_frame.to_csv('data_frame_r_{}_by_coicop.csv'.format(year), sep = ',')
if data_frame_for_reg is not None:
data_frame_for_reg = pd.concat([data_frame_for_reg, data_frame])
else:
data_frame_for_reg = data_frame
data_frame_for_reg.to_csv('data_frame_for_stata_by_coicop.csv', sep = ',')
data_frame_for_reg['somme_wi'] = 0
for i in range(1, 13):
data_frame_for_reg['somme_wi'] += data_frame_for_reg['w{}'.format(i)]
assert (data_frame_for_reg['somme_wi'] == 1).any(), 'The expenditure shares do not sum to 1'
| agpl-3.0 |
ahollocou/walkscan | python_code/walkscan.py | 1 | 3339 | import collections
import numpy as np
import networkx as nx
from sklearn.cluster import DBSCAN
class WalkSCAN:
def __init__(self, nb_steps=2, eps=0.1, min_samples=3):
self.nb_steps = nb_steps
self.eps = eps
self.min_samples = min_samples
self.dbscan_ = DBSCAN(eps=self.eps, min_samples=self.min_samples)
def load(self, graph, init_vector):
self.graph = graph.copy()
self.init_vector = init_vector.copy()
def embed_nodes(self):
p = {0: self.init_vector.copy()}
for t in range(self.nb_steps):
p[t + 1] = collections.defaultdict(int)
for v in p[t]:
for (_, w, e_data) in self.graph.edges(v, data=True):
if 'weight' in e_data:
self.weighted_ = True
p[t + 1][w] += float(e_data['weight']) / float(self.graph.degree(v, weight='weight')) * p[t][v]
else:
self.weighted_ = False
p[t + 1][w] += 1.0 / float(self.graph.degree(v)) * p[t][v]
self.embedded_value_ = dict()
self.embedded_nodes_ = list()
for v in p[self.nb_steps]:
self.embedded_nodes_.append(v)
self.embedded_value_[v] = np.array([p[t + 1][v] for t in range(self.nb_steps)])
self.nb_embedded_nodes_ = len(self.embedded_nodes_)
def find_cores(self):
if self.nb_embedded_nodes_ > 0:
P = np.zeros((self.nb_embedded_nodes_, self.nb_steps))
for (i, node) in enumerate(self.embedded_nodes_):
P[i, :] = self.embedded_value_[node]
self.dbscan_.fit(P)
self.cores_ = collections.defaultdict(set)
self.outliers_ = set()
for (i, node) in enumerate(self.embedded_nodes_):
label = self.dbscan_.labels_[i]
if label >= 0:
self.cores_[label].add(node)
else:
self.outliers_.add(node)
else:
self.cores_ = {}
self.outliers_ = set()
def compute_core_average_value(self):
self.core_average_value_ = dict()
for (core_id, core) in self.cores_.iteritems():
self.core_average_value_[core_id] = np.zeros(self.nb_steps)
for node in core:
for t in range(self.nb_steps):
self.core_average_value_[core_id][t] += self.embedded_value_[node][t] / float(len(core))
def sort_cores(self):
self.sorted_core_ids_ = self.cores_.keys()
self.sorted_core_ids_.sort(key=lambda i: list(self.core_average_value_[i]),
reverse=True)
self.sorted_cores_ = [self.cores_[i] for i in self.sorted_core_ids_]
def aggregate_outliers(self):
self.communities_ = list()
for core in self.sorted_cores_:
community = core.copy()
for node in core:
community |= set(nx.neighbors(self.graph, node)) & self.outliers_
self.communities_.append(community)
def detect_communities(self, graph, init_vector):
self.load(graph, init_vector)
self.embed_nodes()
self.find_cores()
self.compute_core_average_value()
self.sort_cores()
self.aggregate_outliers()
| gpl-3.0 |
borisz264/mod_seq | unused_scripts/map_onto_rRNA_structure.py | 1 | 4996 | __author__ = 'boris'
"""
inputs:
outprefix
bundle 1
bundle 2
bundle 3
bundle 4
bundle 5 - the 5 pdb files from the 4v88 bundle
reactivity_values - a pickled dict of [strand][chromosome][position] = reactivity_value or change, such as from compare_samples.py
outputs:
the 5 PDB files in the bundle, with b factors replaced with reactivity scores for the corresponding rRNA residues.
"""
import sys
import mod_utils
import os
import gzip
from scipy.stats.mstats import winsorize
import matplotlib.pyplot as plt
from collections import defaultdict
import numpy
import math
plt.rcParams['pdf.fonttype'] = 42 #leaves most text as actual text in PDFs, not outlines
def write_wig(mutation_dict, sample_name, output_prefix):
"""
:param mutation_dict: a pickled dict of [strand][chromosome][position] = mutations/coverage
:param output_prefix:
:return:
"""
score_table = open(output_prefix+'_scores.txt', 'w')
plusWig = gzip.open(output_prefix+'_plus.wig.gz', 'w')
#minusWig = gzip.open(output_prefix+'_minus.wig.gz', 'w')
plusWig.write('track type=wiggle_0 name=%s\n' % (sample_name+'_plus'))
#minusWig.write('track type=wiggle_0 name=%s\n' % (sample_name+'_minus'))
allChrs=set(mutation_dict['+'].keys()).union(set(mutation_dict['-'].keys()))
for chr in allChrs:
#minPos = min([min(weightedReads['+'][chr].keys()), min(weightedReads['-'][chr].keys())])
#maxPos = max([max(weightedReads['+'][chr].keys()), max(weightedReads['-'][chr].keys())])
plusWig.write('variableStep chrom=%s\n' % (chr))
#minusWig.write('variableStep chrom=%s\n' % (chr))
if chr in mutation_dict['+']:
for i in sorted(mutation_dict['+'][chr].keys()):
plusWig.write('%d\t%f\n' % (i, mutation_dict['+'][chr][i]))
score_table.write('%s_%d\t%f\n' % (chr, i, mutation_dict['+'][chr][i]))
#if chr in mutation_dict['-']:
#for i in sorted(mutation_dict['-'][chr].keys()):
#minusWig.write('%d\t%f\n' % (i, mutation_dict['-'][chr][i]/mutation_dict))
plusWig.close()
score_table.close()
#minusWig.close()
def normalize_dict_to_max(mutation_dict, winsorize_data = False, winsorization_limits = (0, 0.95)):
all_values = []
normed_dict = {}
for strand in mutation_dict:
normed_dict[strand] = {}
for chromosome in mutation_dict[strand]:
normed_dict[strand][chromosome] = {}
#print mutation_dict[strand][chromosome].values()
all_values += mutation_dict[strand][chromosome].values()
#print all_values
if winsorize_data:
winsorize(all_values, limits = (winsorization_limits[0], 1-winsorization_limits[1]), inplace = True)
max_value = float(max(all_values))
for strand in mutation_dict:
for chromosome in mutation_dict[strand]:
for position in mutation_dict[strand][chromosome]:
val = mutation_dict[strand][chromosome][position]
if val < min(all_values):
val = min(all_values)
if val > max(all_values):
val = max(all_values)
normed_dict[strand][chromosome][position] = val/max_value
return normed_dict
def split_by_n(line, n=6):
"""
trims endline, and returns set of chunks of length 6, each of which has whitespace stripped
:param line:
:param n:
:return:
"""
return [line[i:i+n].strip() for i in range(0, len(line), 6)]
rRNA_assignments = {3:{'d':"S.c.18S_rRNA"}, 1:{'A':"S.c.18S_rRNA"},2:{'A':"S.c.25S__rRNA", 'B':"S.c.5S___rRNA", 'C':"S.c.5.8S_rRNA"} , (4,'K'):"S.c.25S__rRNA", (4,'L'):"S.c.5S___rRNA", (4,'M'):"S.c.5.8S_rRNA", (2,'C'):"S.c.5.8S_rRNA"}
def main():
outprefix, bundle1, bundle2, bundle3, bundle4, bundle5, datafile_name = sys.argv[1:8]
bundles = [bundle1, bundle2, bundle3, bundle4, bundle5]
reactivities = mod_utils.unPickle(datafile_name)
for i in range(1,6):
infile = open(bundles[i-1])
outfile = open(outprefix+'_bundle'+str(i)+'.pdb' ,'w')
for line in infile:
if line.startswith('ATOM'):
chain = line[21]
resi = int(line[22:28].strip())
if i in rRNA_assignments and chain in rRNA_assignments[i] and resi in reactivities['+'][rRNA_assignments[i][chain]]:
new_line = '%s%6.3f%s' % (line[:60], reactivities['+'][rRNA_assignments[i][chain]][resi], line[66:])
assert len(line) == len(new_line)
else:
new_line = '%s%6.4f%s' % (line[:60], 0.0, line[66:])
assert len(line) == len(new_line)
elif line.startswith("ANISOU"):
new_line = '' #remove the anisotropic b factors, I don't need them
else:
new_line = line
outfile.write(new_line)
infile.close()
outfile.close()
main() | mit |
shakamunyi/tensorflow | tensorflow/examples/learn/multiple_gpu.py | 24 | 4167 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: Dict of input `Tensor`.
labels: Label `Tensor`.
mode: One of `ModeKeys`.
Returns:
`EstimatorSpec`.
"""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
with tf.device('/device:GPU:1'):
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
with tf.device('/device:GPU:2'):
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3)
# and with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
Petr-Kovalev/nupic-win32 | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| gpl-3.0 |
updownlife/multipleK | dependencies/biopython-1.65/Bio/Phylo/_utils.py | 2 | 20914 | # Copyright (C) 2009 by Eric Talevich ([email protected])
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Utilities for handling, displaying and exporting Phylo trees.
Third-party libraries are loaded when the corresponding function is called.
"""
__docformat__ = "restructuredtext en"
import math
import sys
def to_networkx(tree):
"""Convert a Tree object to a networkx graph.
The result is useful for graph-oriented analysis, and also interactive
plotting with pylab, matplotlib or pygraphviz, though the resulting diagram
is usually not ideal for displaying a phylogeny.
Requires NetworkX version 0.99 or later.
"""
try:
import networkx
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NetworkX if you want to use to_networkx.")
# NB (1/2010): the networkx API stabilized at v.1.0
# 1.0+: edges accept arbitrary data as kwargs, weights are floats
# 0.99: edges accept weight as a string, nothing else
# pre-0.99: edges accept no additional data
# Ubuntu Lucid LTS uses v0.99, let's support everything
if networkx.__version__ >= '1.0':
def add_edge(graph, n1, n2):
graph.add_edge(n1, n2, weight=n2.branch_length or 1.0)
# Copy branch color value as hex, if available
if hasattr(n2, 'color') and n2.color is not None:
graph[n1][n2]['color'] = n2.color.to_hex()
elif hasattr(n1, 'color') and n1.color is not None:
# Cascading color attributes
graph[n1][n2]['color'] = n1.color.to_hex()
n2.color = n1.color
# Copy branch weight value (float) if available
if hasattr(n2, 'width') and n2.width is not None:
graph[n1][n2]['width'] = n2.width
elif hasattr(n1, 'width') and n1.width is not None:
# Cascading width attributes
graph[n1][n2]['width'] = n1.width
n2.width = n1.width
elif networkx.__version__ >= '0.99':
def add_edge(graph, n1, n2):
graph.add_edge(n1, n2, (n2.branch_length or 1.0))
else:
def add_edge(graph, n1, n2):
graph.add_edge(n1, n2)
def build_subgraph(graph, top):
"""Walk down the Tree, building graphs, edges and nodes."""
for clade in top:
graph.add_node(clade.root)
add_edge(graph, top.root, clade.root)
build_subgraph(graph, clade)
if tree.rooted:
G = networkx.DiGraph()
else:
G = networkx.Graph()
G.add_node(tree.root)
build_subgraph(G, tree.root)
return G
def draw_graphviz(tree, label_func=str, prog='twopi', args='',
node_color='#c0deff', **kwargs):
"""Display a tree or clade as a graph, using the graphviz engine.
Requires NetworkX, matplotlib, Graphviz and either PyGraphviz or pydot.
The third and fourth parameters apply to Graphviz, and the remaining
arbitrary keyword arguments are passed directly to networkx.draw(), which
in turn mostly wraps matplotlib/pylab. See the documentation for Graphviz
and networkx for detailed explanations.
The NetworkX/matplotlib parameters are described in the docstrings for
networkx.draw() and pylab.scatter(), but the most reasonable options to try
are: *alpha, node_color, node_size, node_shape, edge_color, style,
font_size, font_color, font_weight, font_family*
:Parameters:
label_func : callable
A function to extract a label from a node. By default this is str(),
but you can use a different function to select another string
associated with each node. If this function returns None for a node,
no label will be shown for that node.
The label will also be silently skipped if the throws an exception
related to ordinary attribute access (LookupError, AttributeError,
ValueError); all other exception types will still be raised. This
means you can use a lambda expression that simply attempts to look
up the desired value without checking if the intermediate attributes
are available:
>>> Phylo.draw_graphviz(tree, lambda n: n.taxonomies[0].code)
prog : string
The Graphviz program to use when rendering the graph. 'twopi'
behaves the best for large graphs, reliably avoiding crossing edges,
but for moderate graphs 'neato' looks a bit nicer. For small
directed graphs, 'dot' may produce a normal-looking cladogram, but
will cross and distort edges in larger graphs. (The programs 'circo'
and 'fdp' are not recommended.)
args : string
Options passed to the external graphviz program. Normally not
needed, but offered here for completeness.
Example
-------
>>> import pylab
>>> from Bio import Phylo
>>> tree = Phylo.read('ex/apaf.xml', 'phyloxml')
>>> Phylo.draw_graphviz(tree)
>>> pylab.show()
>>> pylab.savefig('apaf.png')
"""
try:
import networkx
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NetworkX if you want to use to_networkx.")
G = to_networkx(tree)
try:
# NetworkX version 1.8 or later (2013-01-20)
Gi = networkx.convert_node_labels_to_integers(G,
label_attribute='label')
int_labels = {}
for integer, nodeattrs in Gi.node.items():
int_labels[nodeattrs['label']] = integer
except TypeError:
# Older NetworkX versions (before 1.8)
Gi = networkx.convert_node_labels_to_integers(G,
discard_old_labels=False)
int_labels = Gi.node_labels
try:
posi = networkx.graphviz_layout(Gi, prog, args=args)
except ImportError:
raise MissingPythonDependencyError(
"Install PyGraphviz or pydot if you want to use draw_graphviz.")
def get_label_mapping(G, selection):
"""Apply the user-specified node relabeling."""
for node in G.nodes():
if (selection is None) or (node in selection):
try:
label = label_func(node)
if label not in (None, node.__class__.__name__):
yield (node, label)
except (LookupError, AttributeError, ValueError):
pass
if 'nodelist' in kwargs:
labels = dict(get_label_mapping(G, set(kwargs['nodelist'])))
else:
labels = dict(get_label_mapping(G, None))
kwargs['nodelist'] = list(labels.keys())
if 'edge_color' not in kwargs:
kwargs['edge_color'] = [isinstance(e[2], dict) and
e[2].get('color', 'k') or 'k'
for e in G.edges(data=True)]
if 'width' not in kwargs:
kwargs['width'] = [isinstance(e[2], dict) and
e[2].get('width', 1.0) or 1.0
for e in G.edges(data=True)]
posn = dict((n, posi[int_labels[n]]) for n in G)
networkx.draw(G, posn, labels=labels, node_color=node_color, **kwargs)
def draw_ascii(tree, file=None, column_width=80):
"""Draw an ascii-art phylogram of the given tree.
The printed result looks like::
_________ Orange
______________|
| |______________ Tangerine
______________|
| | _________________________ Grapefruit
_| |_________|
| |______________ Pummelo
|
|__________________________________ Apple
:Parameters:
file : file-like object
File handle opened for writing the output drawing. (Default:
standard output)
column_width : int
Total number of text columns used by the drawing.
"""
if file is None:
file = sys.stdout
taxa = tree.get_terminals()
# Some constants for the drawing calculations
max_label_width = max(len(str(taxon)) for taxon in taxa)
drawing_width = column_width - max_label_width - 1
drawing_height = 2 * len(taxa) - 1
def get_col_positions(tree):
"""Create a mapping of each clade to its column position."""
depths = tree.depths()
# If there are no branch lengths, assume unit branch lengths
if not max(depths.values()):
depths = tree.depths(unit_branch_lengths=True)
# Potential drawing overflow due to rounding -- 1 char per tree layer
fudge_margin = int(math.ceil(math.log(len(taxa), 2)))
cols_per_branch_unit = ((drawing_width - fudge_margin)
/ float(max(depths.values())))
return dict((clade, int(blen * cols_per_branch_unit + 1.0))
for clade, blen in depths.items())
def get_row_positions(tree):
positions = dict((taxon, 2 * idx) for idx, taxon in enumerate(taxa))
def calc_row(clade):
for subclade in clade:
if subclade not in positions:
calc_row(subclade)
positions[clade] = ((positions[clade.clades[0]] +
positions[clade.clades[-1]]) // 2)
calc_row(tree.root)
return positions
col_positions = get_col_positions(tree)
row_positions = get_row_positions(tree)
char_matrix = [[' ' for x in range(drawing_width)]
for y in range(drawing_height)]
def draw_clade(clade, startcol):
thiscol = col_positions[clade]
thisrow = row_positions[clade]
# Draw a horizontal line
for col in range(startcol, thiscol):
char_matrix[thisrow][col] = '_'
if clade.clades:
# Draw a vertical line
toprow = row_positions[clade.clades[0]]
botrow = row_positions[clade.clades[-1]]
for row in range(toprow + 1, botrow + 1):
char_matrix[row][thiscol] = '|'
# NB: Short terminal branches need something to stop rstrip()
if (col_positions[clade.clades[0]] - thiscol) < 2:
char_matrix[toprow][thiscol] = ','
# Draw descendents
for child in clade:
draw_clade(child, thiscol + 1)
draw_clade(tree.root, 0)
# Print the complete drawing
for idx, row in enumerate(char_matrix):
line = ''.join(row).rstrip()
# Add labels for terminal taxa in the right margin
if idx % 2 == 0:
line += ' ' + str(taxa[idx // 2])
file.write(line + '\n')
file.write('\n')
def draw(tree, label_func=str, do_show=True, show_confidence=True,
# For power users
axes=None, branch_labels=None, *args, **kwargs):
"""Plot the given tree using matplotlib (or pylab).
The graphic is a rooted tree, drawn with roughly the same algorithm as
draw_ascii.
Additional keyword arguments passed into this function are used as pyplot
options. The input format should be in the form of:
pyplot_option_name=(tuple), pyplot_option_name=(tuple, dict), or
pyplot_option_name=(dict).
Example using the pyplot options 'axhspan' and 'axvline':
>>> Phylo.draw(tree, axhspan=((0.25, 7.75), {'facecolor':'0.5'}),
... axvline={'x':'0', 'ymin':'0', 'ymax':'1'})
Visual aspects of the plot can also be modified using pyplot's own functions
and objects (via pylab or matplotlib). In particular, the pyplot.rcParams
object can be used to scale the font size (rcParams["font.size"]) and line
width (rcParams["lines.linewidth"]).
:Parameters:
label_func : callable
A function to extract a label from a node. By default this is str(),
but you can use a different function to select another string
associated with each node. If this function returns None for a node,
no label will be shown for that node.
do_show : bool
Whether to show() the plot automatically.
show_confidence : bool
Whether to display confidence values, if present on the tree.
axes : matplotlib/pylab axes
If a valid matplotlib.axes.Axes instance, the phylogram is plotted
in that Axes. By default (None), a new figure is created.
branch_labels : dict or callable
A mapping of each clade to the label that will be shown along the
branch leading to it. By default this is the confidence value(s) of
the clade, taken from the ``confidence`` attribute, and can be
easily toggled off with this function's ``show_confidence`` option.
But if you would like to alter the formatting of confidence values,
or label the branches with something other than confidence, then use
this option.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
try:
import pylab as plt
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install matplotlib or pylab if you want to use draw.")
import matplotlib.collections as mpcollections
# Arrays that store lines for the plot of clades
horizontal_linecollections = []
vertical_linecollections = []
# Options for displaying branch labels / confidence
def conf2str(conf):
if int(conf) == conf:
return str(int(conf))
return str(conf)
if not branch_labels:
if show_confidence:
def format_branch_label(clade):
if hasattr(clade, 'confidences'):
# phyloXML supports multiple confidences
return '/'.join(conf2str(cnf.value)
for cnf in clade.confidences)
if clade.confidence:
return conf2str(clade.confidence)
return None
else:
def format_branch_label(clade):
return None
elif isinstance(branch_labels, dict):
def format_branch_label(clade):
return branch_labels.get(clade)
else:
assert callable(branch_labels), \
"branch_labels must be either a dict or a callable (function)"
format_branch_label = branch_labels
# Layout
def get_x_positions(tree):
"""Create a mapping of each clade to its horizontal position.
Dict of {clade: x-coord}
"""
depths = tree.depths()
# If there are no branch lengths, assume unit branch lengths
if not max(depths.values()):
depths = tree.depths(unit_branch_lengths=True)
return depths
def get_y_positions(tree):
"""Create a mapping of each clade to its vertical position.
Dict of {clade: y-coord}.
Coordinates are negative, and integers for tips.
"""
maxheight = tree.count_terminals()
# Rows are defined by the tips
heights = dict((tip, maxheight - i)
for i, tip in enumerate(reversed(tree.get_terminals())))
# Internal nodes: place at midpoint of children
def calc_row(clade):
for subclade in clade:
if subclade not in heights:
calc_row(subclade)
# Closure over heights
heights[clade] = (heights[clade.clades[0]] +
heights[clade.clades[-1]]) / 2.0
if tree.root.clades:
calc_row(tree.root)
return heights
x_posns = get_x_positions(tree)
y_posns = get_y_positions(tree)
# The function draw_clade closes over the axes object
if axes is None:
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
elif not isinstance(axes, plt.matplotlib.axes.Axes):
raise ValueError("Invalid argument for axes: %s" % axes)
def draw_clade_lines(use_linecollection=False, orientation='horizontal',
y_here=0, x_start=0, x_here=0, y_bot=0, y_top=0,
color='black', lw='.1'):
"""Create a line with or without a line collection object.
Graphical formatting of the lines representing clades in the plot can be
customized by altering this function.
"""
if (use_linecollection is False and orientation == 'horizontal'):
axes.hlines(y_here, x_start, x_here, color=color, lw=lw)
elif (use_linecollection is True and orientation == 'horizontal'):
horizontal_linecollections.append(mpcollections.LineCollection(
[[(x_start, y_here), (x_here, y_here)]], color=color, lw=lw),)
elif (use_linecollection is False and orientation == 'vertical'):
axes.vlines(x_here, y_bot, y_top, color=color)
elif (use_linecollection is True and orientation == 'vertical'):
vertical_linecollections.append(mpcollections.LineCollection(
[[(x_here, y_bot), (x_here, y_top)]], color=color, lw=lw),)
def draw_clade(clade, x_start, color, lw):
"""Recursively draw a tree, down from the given clade."""
x_here = x_posns[clade]
y_here = y_posns[clade]
# phyloXML-only graphics annotations
if hasattr(clade, 'color') and clade.color is not None:
color = clade.color.to_hex()
if hasattr(clade, 'width') and clade.width is not None:
lw = clade.width * plt.rcParams['lines.linewidth']
# Draw a horizontal line from start to here
draw_clade_lines(use_linecollection=True, orientation='horizontal',
y_here=y_here, x_start=x_start, x_here=x_here, color=color, lw=lw)
# Add node/taxon labels
label = label_func(clade)
if label not in (None, clade.__class__.__name__):
axes.text(x_here, y_here, ' %s' %
label, verticalalignment='center')
# Add label above the branch (optional)
conf_label = format_branch_label(clade)
if conf_label:
axes.text(0.5 * (x_start + x_here), y_here, conf_label,
fontsize='small', horizontalalignment='center')
if clade.clades:
# Draw a vertical line connecting all children
y_top = y_posns[clade.clades[0]]
y_bot = y_posns[clade.clades[-1]]
# Only apply widths to horizontal lines, like Archaeopteryx
draw_clade_lines(use_linecollection=True, orientation='vertical',
x_here=x_here, y_bot=y_bot, y_top=y_top, color=color, lw=lw)
# Draw descendents
for child in clade:
draw_clade(child, x_here, color, lw)
draw_clade(tree.root, 0, 'k', plt.rcParams['lines.linewidth'])
# If line collections were used to create clade lines, here they are added
# to the pyplot plot.
for i in horizontal_linecollections:
axes.add_collection(i)
for i in vertical_linecollections:
axes.add_collection(i)
# Aesthetics
if hasattr(tree, 'name') and tree.name:
axes.set_title(tree.name)
axes.set_xlabel('branch length')
axes.set_ylabel('taxa')
# Add margins around the tree to prevent overlapping the axes
xmax = max(x_posns.values())
axes.set_xlim(-0.05 * xmax, 1.25 * xmax)
# Also invert the y-axis (origin at the top)
# Add a small vertical margin, but avoid including 0 and N+1 on the y axis
axes.set_ylim(max(y_posns.values()) + 0.8, 0.2)
# Parse and process key word arguments as pyplot options
for key, value in kwargs.items():
try:
# Check that the pyplot option input is iterable, as required
[i for i in value]
except TypeError:
raise ValueError('Keyword argument "%s=%s" is not in the format '
'pyplot_option_name=(tuple), pyplot_option_name=(tuple, dict),'
' or pyplot_option_name=(dict) '
% (key, value))
if isinstance(value, dict):
getattr(plt, str(key))(**dict(value))
elif not (isinstance(value[0], tuple)):
getattr(plt, str(key))(*value)
elif (isinstance(value[0], tuple)):
getattr(plt, str(key))(*value[0], **dict(value[1]))
if do_show:
plt.show()
| gpl-2.0 |
floriangeigl/DynamicNetworkViz | example_graph_activity.py | 1 | 2526 | __author__ = 'Florian'
from dyn_net_viz import graph_viz
import datetime
import pandas as pd
from graph_tool.all import *
def main():
# create an undirected tree
second_level_nodes = 10
second_level_leaves = 2
g = Graph(directed=False)
vertices = [g.add_vertex() for i in range(second_level_nodes + 1)]
root_vertex, second_level = vertices[0], vertices[1:]
third_level = []
for v2 in second_level:
g.add_edge(root_vertex, v2)
# add to each second level vertex leaves
for i in range(second_level_leaves):
v = g.add_vertex()
g.add_edge(v2, v)
third_level.append(v)
g.add_edge(v2, g.add_vertex())
print g
# init all nodes with activity = 0
data = []
iteration = 0
for v in g.vertices():
data.append((iteration, v, 0))
for i in range(3):
# set activity of root vertex to 100
iteration += 1
data.append((iteration, root_vertex, 100))
# set activity of all second-level vertices to 100, reduce root vertex activity to 50
iteration += 1
for v in second_level:
data.append((iteration, v, 100))
data.append((iteration, root_vertex, 50))
# set activity of all third-level vertices to 100, reduce second-level vertices activity to 50, set root vertex activity to 0
iteration += 1
for v in third_level:
data.append((iteration, v, 100))
for v in second_level:
data.append((iteration, v, 50))
data.append((iteration, root_vertex, 0))
# and so forth and so on
iteration += 1
for v in third_level:
data.append((iteration, v, 50))
for v in second_level:
data.append((iteration, v, 0))
iteration += 1
for v in third_level:
data.append((iteration, v, 0))
# create the dataframe
df = pd.DataFrame(columns=['iteration', 'vertex', 'activity'], data=data)
#create graph-viz-obj
gv = graph_viz(df, g, filename='example_activ_output/activity_dynamics', df_iteration_key='iteration',
df_vertex_key='vertex', df_state_key='activity', ips=1, smoothing=10, max_node_alpha=1.0,
output_size=(800, 600), edge_blending=True)
gv.plot_network_evolution()
if __name__ == '__main__':
start = datetime.datetime.now()
main()
print '============================='
print 'All done. Overall Time:', str(datetime.datetime.now() - start) | apache-2.0 |
manjunaths/tensorflow | tensorflow/examples/learn/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 5 | 40857 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.python.ops import math_ops
class DNNClassifierTest(tf.test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [0.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[50], [20], [10]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
labels = tf.constant([[0.8], [0.], [0.2]], dtype=tf.float32)
return features, labels
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predictions_proba))
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predictions))
for b in range(3):
self.assertEqual(2, len(predictions_proba[b]))
for i in range(2):
self._assertInRange(0.0, 1.0, predictions_proba[b][i])
self.assertTrue(predictions[b] in (0, 1))
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, labels
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = tf.contrib.learn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = tf.contrib.learn.DNNClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertListEqual(list(predictions), [1, 0, 0])
predictions = classifier.predict_proba(input_fn=_input_fn,
as_iterable=False)
self.assertAllClose(
predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
predictions = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = tf.to_float(labels)
predictions = tf.slice(predictions, [0, 1], [-1, 1])
labels = math_ops.cast(labels, predictions.dtype)
return tf.reduce_sum(tf.mul(predictions, labels))
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(
metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict(input_fn=predict_input_fn)
del classifier
classifier2 = tf.contrib.learn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(tf_random_seed=5)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = tf.train.ClusterSpec({})
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
tf.contrib.layers.real_valued_column('age'),
tf.contrib.layers.embedding_column(language, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, labels
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.DNNRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.DNNRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predictions, atol=0.2)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predictions, atol=0.2)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return tf.reduce_sum(tf.mul(predictions, labels))
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
'my_metric': _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests that when the key is a tuple, an error is raised.
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'):
tf.contrib.metrics.streaming_mean_squared_error})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = tf.train.ClusterSpec({})
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
labels = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, labels
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
| agpl-3.0 |
ngoix/OCRF | examples/decomposition/plot_image_denoising.py | 70 | 6249 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
ddofer/ProFET | ProFET/feat_extract/PipeTasks.py | 1 | 20172 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 8 18:24:07 2014
@author: Dan
Common tasks in processing data and models, likely to be called as part of the pipeline.
"""
from sys import argv
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier, RandomizedLogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC,LinearSVC,NuSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold,cross_val_score,StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import RFE, RFECV, SelectFdr,f_classif,SelectFwe,SelectPercentile,SelectKBest
from sklearn.linear_model import RandomizedLogisticRegression
from sklearn.lda import LDA
from sklearn.decomposition import PCA,FastICA #,TruncatedSVD
from operator import itemgetter
from Model_trainer import load_data
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support,matthews_corrcoef, classification_report
#http://tokestermw.github.io/posts/imbalanced-datasets-random-forests/
# from sklearn.preprocessing import balance_weights
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
'''
https://pythonhosted.org/nolearn/_modules/nolearn/model.html#AveragingEstimator
Gets the majority vote for ensemble of classifiers
Good enhanced confusion matrix + weights for RF:
http://stackoverflow.com/questions/24123498/recursive-feature-elimination-on-random-forest-using-scikit-learn
'''
'Use SKLL? https://skll.readthedocs.org/en/latest/run_experiment.html#quick-example'
def balance_weights(y):
"""
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_weights.py
http://stackoverflow.com/questions/20082674/unbalanced-classification-using-randomforestclassifier-in-sklearn
Compute sample weights such that the class distribution of y becomes
balanced.
Parameters
----------
y : array-like
Labels for the samples.
Returns
-------
weights : array-like
The sample weights.
"""
y = np.asarray(y)
y = np.searchsorted(np.unique(y), y)
bins = np.bincount(y)
weights = 1. / bins.take(y)
weights *= bins.min()
return weights
'TODO: Implement. (More plots and test figures in source)'
def Feature_Importance_plot (est,names):
'http://nbviewer.ipython.org/github/pprett/pydata-gbrt-tutorial/blob/master/gbrt-tutorial.ipynb'
fx_imp = pd.Series(est.feature_importances_, index=names)
fx_imp /= fx_imp.max() # normalize
fx_imp.sort()
fx_imp.plot(kind='barh', figsize=FIGSIZE)
def createImportancePlot(splt,desc,importances,caption):
'''
http://nbviewer.ipython.org/gist/greglandrum/4316460
'''
import numpy.numarray as na
labels = []
weights = []
threshold = sort([abs(w) for w in importances])[-11]
for d in zip(desc,importances):
if abs(d[1]) > threshold:
labels.append(d[0])
weights.append(d[1])
xlocations = na.array(range(len(labels)))+0.5
width = 0.8
splt.bar(xlocations, weights, width=width)
splt.set_xticks([r+1 for r in range(len(labels))])
splt.set_xticklabels(labels)
splt.set_xlim(0, xlocations[-1]+width*2)
splt.set_title(caption)
splt.get_xaxis().tick_bottom()
splt.get_yaxis().tick_left()
def get_enhanced_confusion_matrix(actuals, predictions, labels):
""""
Enhances confusion_matrix by adding sensivity and specificity metrics
http://stackoverflow.com/questions/24123498/recursive-feature-elimination-on-random-forest-using-scikit-learn
"""
cm = confusion_matrix(actuals, predictions, labels = labels)
sensitivity = float(cm[1][1]) / float(cm[1][0]+cm[1][1])
specificity = float(cm[0][0]) / float(cm[0][0]+cm[0][1])
weightedAccuracy = (sensitivity * 0.9) + (specificity * 0.1)
return cm, sensitivity, specificity, weightedAccuracy
def Get_yPred (X,y,clf_class,n_folds=10, pred_proba=False) : #,**kwargs):
'''
Return "Full" Y_predictions from a given c;assifier (not just from one split): (From def run_cv)
http://blog.yhathq.com/posts/predicting-customer-churn-with-sklearn.html
Could also be done with stratified shuffle split (+Append output) ?
http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
'''
# Construct a kfolds object
# kf = StratifiedKFold(len(y),n_folds,shuffle=True) #shuffle?
kf = StratifiedKFold(y,n_folds,shuffle=True) #shuffle?
y_pred = y.copy()
# Iterate through folds
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train = y[train_index]
# sample_weight=balance_weights(y_train)
# Initialize a classifier with key word arguments
clf = clf_class #(**kwargs)
#sample_weight weighting not working here.. ? TODO
clf.fit(X_train,y_train) #,sample_weight) #
if pred_proba == True:
y_pred[test_index] = clf.predict_proba(X_test)
else:
y_pred[test_index] = clf.predict(X_test)
return y_pred
'TODO. Partic featnames, and mrmr?'
'Code near duplicate of that in Model_train.py - def featureFitting'
'Currently works on the file, rather than dataframe/inmemory'
def GetKFeatures(filename, method='RFE',kbest=30,alpha=0.01, reduceMatrix = True):
'''
Gets best features using chosen method
(K-best, RFE, RFECV,'L1' (RandomizedLogisticRegression),'Tree' (ExtraTreesClassifier), mrmr),
then prints top K features' names (from featNames).
If reduceMatrix = True, then also returns X reduced to the K best features.
Available methods' names are: 'RFE','RFECV','RandomizedLogisticRegression','K-best','ExtraTreesClassifier'..
Note, that effectiveyl, Any scikit learn method could be used, if correctly imported..
'''
#est = method()
'''
Gets the K-best features (filtered by FDR, then select best ranked by t-test , more advanced options can be implemented).
Save the data/matrix with the resulting/kept features to a new output file, "REDUCED_Feat.csv"
'''
features, labels, lb_encoder,featureNames = load_data(filename)
X, y = features, labels
# change the names as ints back to strings
class_names=lb_encoder.inverse_transform(y)
print("Data and labels imported. PreFilter Feature matrix shape:")
print(X.shape)
selectK = SelectKBest(k=kbest)
selectK.fit(X,y)
selectK_mask=selectK.get_support()
K_featnames = featureNames[selectK_mask]
print('X After K filter:',X.shape)
print("K_featnames: %s" %(K_featnames))
if reduceMatrix ==True :
Reduced_df = pd.read_csv(filename, index_col=0)
Reduced_df = Reduced_df[Reduced_df.columns[selectK_mask]]
Reduced_df.to_csv('REDUCED_Feat.csv')
print('Saved to REDUCED_Feat.csv')
return Reduced_df
#WORKS! But unreadable with too many features!
def PlotFeaturesImportance(X,y,featureNames):
'''
Plot the relative contribution/importance of the features.
Best to reduce to top X features first - for interpretability
Code example from:
http://bugra.github.io/work/notes/2014-11-22/an-introduction-to-supervised-learning-scikit-learn/
'''
gbc = GradientBoostingClassifier(n_estimators=100)
gbc.fit(X, y)
# Get Feature Importance from the classifier
feature_importance = gbc.feature_importances_
# Normalize The Features
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.figure(figsize=(16, 12))
plt.barh(pos, feature_importance[sorted_idx], align='center', color='#7A68A6')
#plt.yticks(pos, np.asanyarray(df.columns.tolist())[sorted_idx]) #ORIG
plt.yticks(pos, np.asanyarray(featureNames)[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Features Importance')
plt.show()
'Works!'
def PlotPerfPercentFeatures(X,y,est=LinearSVC()):
'''
Performance of a classifier (default: SVM-Anova)
varying the percentile of features selected (F-test) .
http://scikit-learn.org/stable/auto_examples/svm/plot_svm_anova.html#example-svm-plot-svm-anova-py
See Also: (Similar but with model seelction from among classifiers):
http://nbviewer.ipython.org/github/bugra/pydata-nyc-2014/blob/master/6.%20Scikit%20Learn%20-%20Model%20Selection.ipynb
'''
transform = SelectPercentile(f_classif)
clf = Pipeline([('anova', transform), ('est', est)])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1,2,3,5,7,10,13,15,20,25,33,50,65,75,90, 99)
# percentiles = (1,5,10,25,50,75,90)
for percentile in percentiles:
# print(percentile)
clf.set_params(anova__percentile=percentile)
this_scores = cross_val_score(clf, X, y,cv=StratifiedShuffleSplit(y, n_iter=7, test_size=0.3), n_jobs=-1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
print("Outputting Graph:")
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Predictor Performance, varying percent of features used')
plt.xlabel('Percentile')
plt.ylabel('Prediction Performance')
plt.axis('tight')
plt.show()
'NOT tested'
def plotRFECV (X,y,stepSize=0.05,scoring='f1'):
'''
Plot recursive feature elimination example with automatic tuning of the number of features selected with cross-validation.
http://scikit-learn.org/stable/auto_examples/plot_rfe_with_cross_validation.html#example-plot-rfe-with-cross-validation-py
'''
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
# Create the RFE object and compute a cross-validated score.
# svc = SVC(kernel="linear")
svc = SVC(kernel="linear",class_weight='auto', cache_size=1400)
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=stepSize, cv=StratifiedKFold(y, 2),
scoring=scoring)
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
import matplotlib.pyplot as plt
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
return rfecv
def report(grid_scores, n_top=2) :
'''
Print out top models/parameters after a grid search for model params.
'''
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores) :
if n_top>1:
print("Model with rank: {0}".format(i + 1))
print("Average Cross-Validation score (while tuning): {0:.2f} (std: {1:.2f})".format(
score.mean_validation_score, np.std(score.cv_validation_scores)))
print("Model Parameters: {0}".format(score.parameters))
print("")
def ModelParam_GridSearch(X_train, y_train, cv=4,scoreParam = 'f1'):
'''
Basic grid searchCV for multiple classifiers' perf & parameters.
This is very limited and computationally expensive.
Not guaranteed to reach even a local optima, but good to get a
rough idea of parameters for the classifiers. (Does not address pre-processing)
More classifiers can be added as desired, and parameters expanded.
Later: Add options for RBM + Logit; PCA; ICA; LDA.
See also
http://scikit-learn-laboratory.readthedocs.org/en/latest/_modules/skll/learner.html
TODO: Add parameters + put classifiers/"pipeline_#" in a list. (To allow checking only some params)
'''
# pipeline1 = Pipeline('clf', RandomForestClassifier() )
#
# pipeline2 = Pipeline(
# ('clf', KNeighborsClassifier()),)
pipeline1 = RandomForestClassifier(n_jobs=-1)
pipeline2 = KNeighborsClassifier()
pipeline3 = SVC(cache_size=1500)
# pipeline3 = NuSVC(cache_size=1500)
pipeline4 = GaussianNB()
pipeline5 = GradientBoostingClassifier()
pipeline6 = SGDClassifier()
pipeline7 = LogisticRegression()
'RandomForestClassifier:'
parameters1 = {
'n_estimators': [150],
'criterion': ['gini'],
'max_features': ['auto',0.4],
'max_depth': [8,None],
'min_samples_leaf':[1,2],
'min_samples_split':[2,4],
'n_jobs':[-1]
}
#, 'entropy'
# 'n_jobs':[-1]
'KNeighborsClassifier:'
parameters2 = {
'n_neighbors': [7],
'weights': ['distance']
}
'SVC:'
parameters3 = {
'C': [0.01,0.1, 1,10,100],
'kernel': ['linear','rbf'],
'gamma': [0.1,0.0, 1.0,20],
'cache_size':[1500],
'class_weight':['auto'],
}
# , 'poly','sigmoid']
## 'GaussianNB:'
## parameters4 = {}
'GradientBoostingClassifier'
parameters5 = {
'max_depth':[3,5,8],
'n_estimators': [100],
'min_samples_leaf':[1,2],
'learning_rate': [0.1, 0.01],
'max_features': ['auto',0.4]
}
'SGDClassifier:'
parameters6 = {
'alpha': [0.00001,0.001,0.01],
'penalty': ['l1','l2', 'elasticnet'],
'n_iter': [300],
'loss':['hinge'],
'n_jobs':[-1],
'class_weight':['auto']
}
#, 'modified_huber','log'
'LogisticRegression:'
parameters7 = {
'C': [0.001,0.01, 0.1, 1.0,10,100],
'penalty': ['l1','l2'],
'class_weight':['auto']
}
'TODO: make this into a seperate method, with pars, pips passed to it as params'
pars = [parameters1, parameters2, parameters3,parameters5,parameters6,parameters7] #parameters4
pips = [pipeline1, pipeline2, pipeline3,pipeline5,pipeline6,pipeline7] # pipeline4,
print ("Starting Gridsearch To find each model's best parameters")
for i in range(len(pars)):
print(pips[i])
gs = GridSearchCV(estimator=pips[i], param_grid=pars[i],
verbose=0, refit=True, n_jobs=-1,iid=False,
pre_dispatch='2*n_jobs',scoring=scoreParam,
fit_params={'sample_weight': balance_weights(y)},
cv=StratifiedKFold(y_train,n_folds=cv,shuffle=True))
#Valid scoring options: ['accuracy', 'average_precision', 'f1', 'precision', 'recall', 'roc_auc']
# gs = gs.fit(X_train, y_train)
'http://stackoverflow.com/questions/13051706/scikit-learn-using-sample-weight-in-grid-search?rq=1'
'Note: Remove "class_weight=auto" from the autoweighting classifiers!!'
"Set Class weights (then into sample weights: https://github.com/scikit-learn/scikit-learn/blob/8dab222cfe894126dfb67832da2f4e871b87bce7/sklearn/utils/class_weight.py"
gs.fit(X_train, y_train)
#print ("Finished Gridsearch")
#print (gs.best_score_)
report(gs.grid_scores_)
# http://stackoverflow.com/questions/18210799/scikit-learn-sample-try-out-with-my-classifier-and-data
'Get more exhaustive CV results with the best tuned parameters for the model'
est = gs.best_estimator_
scores = cross_val_score(est, X_train,
y_train,
cv=StratifiedShuffleSplit(y=y_train, n_iter=10, test_size=0.2),scoring=scoreParam,
n_jobs=-1, pre_dispatch='1.8*n_jobs')
print("Tuned Model's %s Score: %0.3f (+/- %0.3f)" % (scoreParam,scores.mean(), scores.std() * 2))
if __name__ == '__main__' :
'TODO: Allow user to select desired function - CV model, or feature reduction'
'TODO: Use os.path.join - for file names/locations/dirs..'
#Set by user input:
fileName = r'/trainingSetFeatures.csv'
filePath = str(argv[1])
X, y, lb_encoder,featureNames = load_data(filePath+fileName, 'file') # X, y = features, labels
print(X.shape,"= (samples, features)")
y_inv = Counter(lb_encoder.inverse_transform(y))
print("Classes:", y_inv)
# 'Normalize/Scale features if needed. Our data is standardized by default'
# X = StandardScaler(copy=False).fit_transform(X)
Fwe = SelectFwe(alpha=0.01).fit(X,y)
X=Fwe.transform(X)
featureNames=featureNames[Fwe.get_support()]
print("F-test filter ->",X.shape)
FeatSelection_SVM=True
FeatSelection_RandLogReg=False
if FeatSelection_RandLogReg == True:
LogRegFeats = RandomizedLogisticRegression(C=5, scaling=0.5,
sample_fraction=0.8, n_resampling=60, selection_threshold=0.2,n_jobs=-1)
X = LogRegFeats.fit_transform(X,y)
featureNames=featureNames[LogRegFeats.get_support()]
print("RandomizedLogisticRegression Feature Selection ->:",X.shape)
elif FeatSelection_SVM == True:
X= LinearSVC(C=1, penalty="l1", dual=False,class_weight='auto').fit_transform(X, y)
# X= LogisticRegression(C=0.01,class_weight='auto').fit_transform(X, y)
featureNames=featureNames[LogRegFeats.get_support()]
print ("SVC Transformed X:",X.shape)
'''
print("Plot #Feats vs Classification performance:")
PlotPerfPercentFeatures(X_LR,y,est=SVC(C=100))
'''
KFilt=None
# KFilt=200
if KFilt is not None:
k = SelectKBest(k=KFilt).fit(X,y)
X=k.transform(X)
featureNames=featureNames[k.get_support()]
print("X reduced to K best features: ",X.shape)
print("Performance as a function of percent of features used:")
PlotPerfPercentFeatures(X,y,est=LinearSVC())
#varFilt = VarianceThreshold(threshold=0.05)
#X = varFilt.fit_transform(X)
#print(X.shape,"X post low variance feature filtering")
'EG - graph best features; feature selection using RF, ensemble classifiers..'
'http://nbviewer.ipython.org/github/herrfz/dataanalysis/blob/master/assignment2/samsung_data_prediction_submitted.ipynb'
RFE_FeatsToKeep = 15
FeatSelection_RFE=True
FeatSelection_RFECV=False
if (FeatSelection_RFE or FeatSelection_RFECV) == True:
'RFE + - best feats'
'http://scikit-learn.org/stable/auto_examples/plot_rfe_with_cross_validation.html '
svc = LinearSVC(class_weight='auto')#,penalty='l1',dual=False)
# svc = LogisticRegression(class_weight='auto')#,C=1)
if FeatSelection_RFECV==True:
rfecv = RFECV(estimator=svc, step=0.1,
cv=StratifiedShuffleSplit(y,n_iter=7,test_size=0.33),
scoring='f1',verbose=0)
# " scoring='roc_auc','recall','f1'..."
else:
rfecv = RFE(estimator=svc,n_features_to_select=RFE_FeatsToKeep, step=0.1)
rfecv.fit(X, y)
if FeatSelection_RFECV==True:
print("RFEcv selected %d number of Optimal features : " % (rfecv.n_features_))
print("RFE (%d Features) scorer : \n" % (rfecv.n_features_),rfecv.score(X, y) )
print("RFE selected feature names:")
featureNames=featureNames[rfecv.get_support()]
rfe_featnames = featureNames[rfecv.get_support()]
print (rfe_featnames)
X_RFE = rfecv.fit_transform(X, y)
print(X_RFE.shape,"X_RFE \n")
'Set GetRFEPerf To true or by user, if perf. of reduced set wanted'
GetRFEPerf=False
print("\n X: \n")
ModelParam_GridSearch(X,y,cv=4)
if GetRFEPerf==True:
print("\n X-RFE: \n")
ModelParam_GridSearch(X_RFE,y,cv=4)
GetPCAPerf=False
if GetPCAPerf==True:
pca = PCA(n_components=0.99,whiten=False)
X_PCA = pca.fit_transform(X)
print(X_PCA.shape,"X + PCA")
print("X_PCA \n")
ModelParam_GridSearch(X_PCA,y,cv=3)
| gpl-3.0 |
yarikoptic/NiPy-OLD | examples/neurospin/multi_subject_parcelation.py | 1 | 1704 | """
This script contains a quick demo on a multi'subject parcellation
on a small 2D example
"""
import numpy as np
import nipy.neurospin.spatial_models.hierarchical_parcellation as hp
import nipy.neurospin.utils.simul_2d_multisubject_fmri_dataset as simul
import nipy.neurospin.spatial_models.parcellation as fp
# step 1: generate some synthetic data
nsubj = 10
dimx = 60
dimy = 60
pos = 3*np.array([[ 6, 7],
[10, 10],
[15, 10]])
ampli = np.array([5, 7, 6])
sjitter = 6.0
dataset = simul.make_surrogate_array(nbsubj=nsubj, dimx=dimx, dimy=dimy,
pos=pos, ampli=ampli, width=10.0)
# dataset represents 2D activation images from nsubj subjects,
# with shape (dimx,dimy)
# step 2 : prepare all the information for the parcellation
nbparcel = 10
ref_dim = (dimx,dimy)
xy = np.array(np.where(dataset[0])).T
nvox = np.size(xy,0)
xyz = np.hstack((xy,np.zeros((nvox,1))))
ldata = np.reshape(dataset,(nsubj,dimx*dimy,1))
anat_coord = xy
mask = np.ones((nvox,nsubj)).astype('bool')
Pa = fp.Parcellation(nbparcel,xyz,mask-1)
# step 3 : run the algorithm
Pa = hp.hparcel(Pa, ldata, anat_coord, mu = 3.0)
# note: play with mu to change the 'stiffness of the parcellation'
# step 4: look at the results
Label = np.array([np.reshape(Pa.label[:,s],(dimx,dimy))
for s in range(nsubj)])
import matplotlib.pylab as mp
mp.figure()
for s in range(nsubj):
mp.subplot(2, 5, s+1)
mp.imshow(dataset[s], interpolation='nearest')
mp.axis('off')
mp.figure()
for s in range(nsubj):
mp.subplot(2, 5, s+1)
mp.imshow(Label[s], interpolation='nearest', vmin=-1, vmax=nbparcel)
mp.axis('off')
mp.show()
| bsd-3-clause |
rs2/pandas | pandas/tests/generic/test_frame.py | 2 | 7703 | from copy import deepcopy
from operator import methodcaller
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
import pandas._testing as tm
from .test_generic import Generic
class TestDataFrame(Generic):
_typ = DataFrame
_comparator = lambda self, x, y: tm.assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame(
[11, 21, 31],
index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]),
)
df.rename(str.lower)
@pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
def test_set_axis_name(self, func):
df = pd.DataFrame([[1, 2], [3, 4]])
result = methodcaller(func, "foo")(df)
assert df.index.name is None
assert result.index.name == "foo"
result = methodcaller(func, "cols", axis=1)(df)
assert df.columns.name is None
assert result.columns.name == "cols"
@pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
def test_set_axis_name_mi(self, func):
df = DataFrame(
np.empty((3, 3)),
index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]),
columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]),
)
level_names = ["L1", "L2"]
result = methodcaller(func, level_names)(df)
assert result.index.names == level_names
assert result.columns.names == [None, None]
result = methodcaller(func, level_names, axis=1)(df)
assert result.columns.names == ["L1", "L2"]
assert result.index.names == [None, None]
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
assert df.bool()
df = DataFrame([[False]])
assert not df.bool()
df = DataFrame([[False, False]])
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df.bool()
with pytest.raises(ValueError, match=msg):
bool(df)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({"A": [1, "2", 3.0]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
}
)
result = df.groupby("A").sum()
self.check_metadata(df, result)
# resample
df = DataFrame(
np.random.randn(1000, 2),
index=date_range("20130101", periods=1000, freq="s"),
)
result = df.resample("1T")
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["a", "b"])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["c", "d"])
DataFrame._metadata = ["filename"]
df1.filename = "fname1.csv"
df2.filename = "fname2.csv"
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == "merge":
left, right = other.left, other.right
value = getattr(left, name, "") + "|" + getattr(right, name, "")
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ""))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")
assert result.filename == "fname1.csv|fname2.csv"
# concat
# GH 6927
DataFrame._metadata = ["filename"]
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list("ab"))
df1.filename = "foo"
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == "concat":
value = "+".join(
[getattr(o, name) for o in other.objs if getattr(o, name, None)]
)
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
assert result.filename == "foo+foo"
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize # FIXME: use monkeypatch
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({"x": [1, 2, 3]})
df.y = 2
df["y"] = [2, 4, 6]
df.y = 5
assert df.y == 5
tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y"))
def test_deepcopy_empty(self):
# This test covers empty frame copying with non-empty column sets
# as reported in issue GH15370
empty_frame = DataFrame(data=[], index=[], columns=["A"])
empty_frame_copy = deepcopy(empty_frame)
self._compare(empty_frame_copy, empty_frame)
# formerly in Generic but only test DataFrame
class TestDataFrame2:
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_args(self, value):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
msg = 'For argument "inplace" expected type bool, received type'
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).rename_axis(
mapper={"a": "x", "b": "y"}, axis=1, inplace=value
)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).drop("a", axis=1, inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df)._consolidate(inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).fillna(value=0, inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).replace(to_replace=1, value=7, inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).interpolate(inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df)._where(cond=df.a > 2, inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).mask(cond=df.a > 2, inplace=value)
def test_unexpected_keyword(self):
# GH8597
df = DataFrame(np.random.randn(5, 2), columns=["jim", "joe"])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df["joe"].copy()
ts[2] = np.nan
msg = "unexpected keyword"
with pytest.raises(TypeError, match=msg):
df.drop("joe", axis=1, in_place=True)
with pytest.raises(TypeError, match=msg):
df.reindex([1, 0], inplace=True)
with pytest.raises(TypeError, match=msg):
ca.fillna(0, inplace=True)
with pytest.raises(TypeError, match=msg):
ts.fillna(0, in_place=True)
| bsd-3-clause |
ujfjhz/vnpy | docker/dockerTrader/ctaStrategy/ctaBacktesting.py | 5 | 40088 | # encoding: UTF-8
'''
本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
'''
from __future__ import division
from datetime import datetime, timedelta
from collections import OrderedDict
from itertools import product
import multiprocessing
import pymongo
from ctaBase import *
from vtConstant import *
from vtGateway import VtOrderData, VtTradeData
from vtFunction import loadMongoSetting
########################################################################
class BacktestingEngine(object):
"""
CTA回测引擎
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
"""
TICK_MODE = 'tick'
BAR_MODE = 'bar'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 引擎类型为回测
self.engineType = ENGINETYPE_BACKTESTING
# 回测相关
self.strategy = None # 回测策略
self.mode = self.BAR_MODE # 回测模式,默认为K线
self.startDate = ''
self.initDays = 0
self.endDate = ''
self.slippage = 0 # 回测时假设的滑点
self.rate = 0 # 回测时假设的佣金比例(适用于百分比佣金)
self.size = 1 # 合约大小,默认为1
self.priceTick = 0 # 价格最小变动
self.dbClient = None # 数据库客户端
self.dbCursor = None # 数据库指针
#self.historyData = [] # 历史数据的列表,回测用
self.initData = [] # 初始化用的数据
#self.backtestingData = [] # 回测用的数据
self.dbName = '' # 回测数据库名
self.symbol = '' # 回测集合名
self.dataStartDate = None # 回测数据开始日期,datetime对象
self.dataEndDate = None # 回测数据结束日期,datetime对象
self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),datetime对象
self.limitOrderDict = OrderedDict() # 限价单字典
self.workingLimitOrderDict = OrderedDict() # 活动限价单字典,用于进行撮合用
self.limitOrderCount = 0 # 限价单编号
self.tradeCount = 0 # 成交编号
self.tradeDict = OrderedDict() # 成交字典
self.logList = [] # 日志记录
# 当前最新数据,用于模拟成交用
self.tick = None
self.bar = None
self.dt = None # 最新的时间
#----------------------------------------------------------------------
def setStartDate(self, startDate='20100416', initDays=10):
"""设置回测的启动日期"""
self.startDate = startDate
self.initDays = initDays
self.dataStartDate = datetime.strptime(startDate, '%Y%m%d')
initTimeDelta = timedelta(initDays)
self.strategyStartDate = self.dataStartDate + initTimeDelta
#----------------------------------------------------------------------
def setEndDate(self, endDate=''):
"""设置回测的结束日期"""
self.endDate = endDate
if endDate:
self.dataEndDate= datetime.strptime(endDate, '%Y%m%d')
# 若不修改时间则会导致不包含dataEndDate当天数据
self.dataEndDate = self.dataEndDate.replace(hour=23, minute=59)
#----------------------------------------------------------------------
def setBacktestingMode(self, mode):
"""设置回测模式"""
self.mode = mode
#----------------------------------------------------------------------
def setDatabase(self, dbName, symbol):
"""设置历史数据所用的数据库"""
self.dbName = dbName
self.symbol = symbol
#----------------------------------------------------------------------
def loadHistoryData(self):
"""载入历史数据"""
host, port, logging = loadMongoSetting()
self.dbClient = pymongo.MongoClient(host, port)
collection = self.dbClient[self.dbName][self.symbol]
self.output(u'开始载入数据')
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
# 载入初始化需要用的数据
flt = {'datetime':{'$gte':self.dataStartDate,
'$lt':self.strategyStartDate}}
initCursor = collection.find(flt)
# 将数据从查询指针中读取出,并生成列表
self.initData = [] # 清空initData列表
for d in initCursor:
data = dataClass()
data.__dict__ = d
self.initData.append(data)
# 载入回测数据
if not self.dataEndDate:
flt = {'datetime':{'$gte':self.strategyStartDate}} # 数据过滤条件
else:
flt = {'datetime':{'$gte':self.strategyStartDate,
'$lte':self.dataEndDate}}
self.dbCursor = collection.find(flt)
self.output(u'载入完成,数据量:%s' %(initCursor.count() + self.dbCursor.count()))
#----------------------------------------------------------------------
def runBacktesting(self):
"""运行回测"""
# 载入历史数据
self.loadHistoryData()
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
self.output(u'开始回测')
self.strategy.inited = True
self.strategy.onInit()
self.output(u'策略初始化完成')
self.strategy.trading = True
self.strategy.onStart()
self.output(u'策略启动完成')
self.output(u'开始回放数据')
for d in self.dbCursor:
data = dataClass()
data.__dict__ = d
func(data)
self.output(u'数据回放结束')
#----------------------------------------------------------------------
def newBar(self, bar):
"""新的K线"""
self.bar = bar
self.dt = bar.datetime
self.crossLimitOrder() # 先撮合限价单
self.crossStopOrder() # 再撮合停止单
self.strategy.onBar(bar) # 推送K线到策略中
#----------------------------------------------------------------------
def newTick(self, tick):
"""新的Tick"""
self.tick = tick
self.dt = tick.datetime
self.crossLimitOrder()
self.crossStopOrder()
self.strategy.onTick(tick)
#----------------------------------------------------------------------
def initStrategy(self, strategyClass, setting=None):
"""
初始化策略
setting是策略的参数设置,如果使用类中写好的默认设置则可以不传该参数
"""
self.strategy = strategyClass(self, setting)
self.strategy.name = self.strategy.className
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
order = VtOrderData()
order.vtSymbol = vtSymbol
order.price = self.roundToPriceTick(price)
order.totalVolume = volume
order.status = STATUS_NOTTRADED # 刚提交尚未成交
order.orderID = orderID
order.vtOrderID = orderID
order.orderTime = str(self.dt)
# CTA委托类型映射
if orderType == CTAORDER_BUY:
order.direction = DIRECTION_LONG
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
order.direction = DIRECTION_LONG
order.offset = OFFSET_CLOSE
# 保存到限价单字典中
self.workingLimitOrderDict[orderID] = order
self.limitOrderDict[orderID] = order
return orderID
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
if vtOrderID in self.workingLimitOrderDict:
order = self.workingLimitOrderDict[vtOrderID]
order.status = STATUS_CANCELLED
order.cancelTime = str(self.dt)
del self.workingLimitOrderDict[vtOrderID]
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.price = self.roundToPriceTick(price)
so.volume = volume
so.strategy = strategy
so.stopOrderID = stopOrderID
so.status = STOPORDER_WAITING
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
return stopOrderID
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def crossLimitOrder(self):
"""基于最新数据撮合限价单"""
# 先确定会撮合成交的价格
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.low # 若买入方向限价单价格高于该价格,则会成交
sellCrossPrice = self.bar.high # 若卖出方向限价单价格低于该价格,则会成交
buyBestCrossPrice = self.bar.open # 在当前时间点前发出的买入委托可能的最优成交价
sellBestCrossPrice = self.bar.open # 在当前时间点前发出的卖出委托可能的最优成交价
else:
buyCrossPrice = self.tick.askPrice1
sellCrossPrice = self.tick.bidPrice1
buyBestCrossPrice = self.tick.askPrice1
sellBestCrossPrice = self.tick.bidPrice1
# 遍历限价单字典中的所有限价单
for orderID, order in self.workingLimitOrderDict.items():
# 判断是否会成交
buyCross = (order.direction==DIRECTION_LONG and
order.price>=buyCrossPrice and
buyCrossPrice > 0) # 国内的tick行情在涨停时askPrice1为0,此时买无法成交
sellCross = (order.direction==DIRECTION_SHORT and
order.price<=sellCrossPrice and
sellCrossPrice > 0) # 国内的tick行情在跌停时bidPrice1为0,此时卖无法成交
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = order.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
trade.orderID = order.orderID
trade.vtOrderID = order.orderID
trade.direction = order.direction
trade.offset = order.offset
# 以买入为例:
# 1. 假设当根K线的OHLC分别为:100, 125, 90, 110
# 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105
# 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100
if buyCross:
trade.price = min(order.price, buyBestCrossPrice)
self.strategy.pos += order.totalVolume
else:
trade.price = max(order.price, sellBestCrossPrice)
self.strategy.pos -= order.totalVolume
trade.volume = order.totalVolume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
order.tradedVolume = order.totalVolume
order.status = STATUS_ALLTRADED
self.strategy.onOrder(order)
# 从字典中删除该限价单
del self.workingLimitOrderDict[orderID]
#----------------------------------------------------------------------
def crossStopOrder(self):
"""基于最新数据撮合停止单"""
# 先确定会撮合成交的价格,这里和限价单规则相反
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.high # 若买入方向停止单价格低于该价格,则会成交
sellCrossPrice = self.bar.low # 若卖出方向限价单价格高于该价格,则会成交
bestCrossPrice = self.bar.open # 最优成交价,买入停止单不能低于,卖出停止单不能高于
else:
buyCrossPrice = self.tick.lastPrice
sellCrossPrice = self.tick.lastPrice
bestCrossPrice = self.tick.lastPrice
# 遍历停止单字典中的所有停止单
for stopOrderID, so in self.workingStopOrderDict.items():
# 判断是否会成交
buyCross = so.direction==DIRECTION_LONG and so.price<=buyCrossPrice
sellCross = so.direction==DIRECTION_SHORT and so.price>=sellCrossPrice
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = so.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
if buyCross:
self.strategy.pos += so.volume
trade.price = max(bestCrossPrice, so.price)
else:
self.strategy.pos -= so.volume
trade.price = min(bestCrossPrice, so.price)
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
trade.orderID = orderID
trade.vtOrderID = orderID
trade.direction = so.direction
trade.offset = so.offset
trade.volume = so.volume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
so.status = STOPORDER_TRIGGERED
order = VtOrderData()
order.vtSymbol = so.vtSymbol
order.symbol = so.vtSymbol
order.orderID = orderID
order.vtOrderID = orderID
order.direction = so.direction
order.offset = so.offset
order.price = so.price
order.totalVolume = so.volume
order.tradedVolume = so.volume
order.status = STATUS_ALLTRADED
order.orderTime = trade.tradeTime
self.strategy.onOrder(order)
self.limitOrderDict[orderID] = order
# 从字典中删除该限价单
if stopOrderID in self.workingStopOrderDict:
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错"""
pass
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Bar"""
return self.initData
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Tick"""
return self.initData
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录日志"""
log = str(self.dt) + ' ' + content
self.logList.append(log)
#----------------------------------------------------------------------
def output(self, content):
"""输出内容"""
print(str(datetime.now()) + "\t" + content)
#----------------------------------------------------------------------
def calculateBacktestingResult(self):
"""
计算回测结果
"""
self.output(u'计算回测结果')
# 首先基于回测后的成交记录,计算每笔交易的盈亏
resultList = [] # 交易结果列表
longTrade = [] # 未平仓的多头交易
shortTrade = [] # 未平仓的空头交易
tradeTimeList = [] # 每笔成交时间戳
posList = [0] # 每笔成交后的持仓情况
for trade in self.tradeDict.values():
# 多头交易
if trade.direction == DIRECTION_LONG:
# 如果尚无空头交易
if not shortTrade:
longTrade.append(trade)
# 当前多头交易为平空
else:
while True:
entryTrade = shortTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
-closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([-1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
shortTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not shortTrade:
longTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 空头交易
else:
# 如果尚无多头交易
if not longTrade:
shortTrade.append(trade)
# 当前空头交易为平多
else:
while True:
entryTrade = longTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
longTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not longTrade:
shortTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 检查是否有交易
if not resultList:
self.output(u'无交易结果')
return {}
# 然后基于每笔交易的结果,我们可以计算具体的盈亏曲线和最大回撤等
capital = 0 # 资金
maxCapital = 0 # 资金最高净值
drawdown = 0 # 回撤
totalResult = 0 # 总成交数量
totalTurnover = 0 # 总成交金额(合约面值)
totalCommission = 0 # 总手续费
totalSlippage = 0 # 总滑点
timeList = [] # 时间序列
pnlList = [] # 每笔盈亏序列
capitalList = [] # 盈亏汇总的时间序列
drawdownList = [] # 回撤的时间序列
winningResult = 0 # 盈利次数
losingResult = 0 # 亏损次数
totalWinning = 0 # 总盈利金额
totalLosing = 0 # 总亏损金额
for result in resultList:
capital += result.pnl
maxCapital = max(capital, maxCapital)
drawdown = capital - maxCapital
pnlList.append(result.pnl)
timeList.append(result.exitDt) # 交易的时间戳使用平仓时间
capitalList.append(capital)
drawdownList.append(drawdown)
totalResult += 1
totalTurnover += result.turnover
totalCommission += result.commission
totalSlippage += result.slippage
if result.pnl >= 0:
winningResult += 1
totalWinning += result.pnl
else:
losingResult += 1
totalLosing += result.pnl
# 计算盈亏相关数据
winningRate = winningResult/totalResult*100 # 胜率
averageWinning = 0 # 这里把数据都初始化为0
averageLosing = 0
profitLossRatio = 0
if winningResult:
averageWinning = totalWinning/winningResult # 平均每笔盈利
if losingResult:
averageLosing = totalLosing/losingResult # 平均每笔亏损
if averageLosing:
profitLossRatio = -averageWinning/averageLosing # 盈亏比
# 返回回测结果
d = {}
d['capital'] = capital
d['maxCapital'] = maxCapital
d['drawdown'] = drawdown
d['totalResult'] = totalResult
d['totalTurnover'] = totalTurnover
d['totalCommission'] = totalCommission
d['totalSlippage'] = totalSlippage
d['timeList'] = timeList
d['pnlList'] = pnlList
d['capitalList'] = capitalList
d['drawdownList'] = drawdownList
d['winningRate'] = winningRate
d['averageWinning'] = averageWinning
d['averageLosing'] = averageLosing
d['profitLossRatio'] = profitLossRatio
d['posList'] = posList
d['tradeTimeList'] = tradeTimeList
return d
#----------------------------------------------------------------------
def showBacktestingResult(self):
"""显示回测结果"""
d = self.calculateBacktestingResult()
# 输出
self.output('-' * 30)
self.output(u'第一笔交易:\t%s' % d['timeList'][0])
self.output(u'最后一笔交易:\t%s' % d['timeList'][-1])
self.output(u'总交易次数:\t%s' % formatNumber(d['totalResult']))
self.output(u'总盈亏:\t%s' % formatNumber(d['capital']))
self.output(u'最大回撤: \t%s' % formatNumber(min(d['drawdownList'])))
self.output(u'平均每笔盈利:\t%s' %formatNumber(d['capital']/d['totalResult']))
self.output(u'平均每笔滑点:\t%s' %formatNumber(d['totalSlippage']/d['totalResult']))
self.output(u'平均每笔佣金:\t%s' %formatNumber(d['totalCommission']/d['totalResult']))
self.output(u'胜率\t\t%s%%' %formatNumber(d['winningRate']))
self.output(u'盈利交易平均值\t%s' %formatNumber(d['averageWinning']))
self.output(u'亏损交易平均值\t%s' %formatNumber(d['averageLosing']))
self.output(u'盈亏比:\t%s' %formatNumber(d['profitLossRatio']))
# 绘图
import matplotlib.pyplot as plt
import numpy as np
try:
import seaborn as sns # 如果安装了seaborn则设置为白色风格
sns.set_style('whitegrid')
except ImportError:
pass
pCapital = plt.subplot(4, 1, 1)
pCapital.set_ylabel("capital")
pCapital.plot(d['capitalList'], color='r', lw=0.8)
pDD = plt.subplot(4, 1, 2)
pDD.set_ylabel("DD")
pDD.bar(range(len(d['drawdownList'])), d['drawdownList'], color='g')
pPnl = plt.subplot(4, 1, 3)
pPnl.set_ylabel("pnl")
pPnl.hist(d['pnlList'], bins=50, color='c')
pPos = plt.subplot(4, 1, 4)
pPos.set_ylabel("Position")
if d['posList'][-1] == 0:
del d['posList'][-1]
tradeTimeIndex = [item.strftime("%m/%d %H:%M:%S") for item in d['tradeTimeList']]
xindex = np.arange(0, len(tradeTimeIndex), np.int(len(tradeTimeIndex)/10))
tradeTimeIndex = map(lambda i: tradeTimeIndex[i], xindex)
pPos.plot(d['posList'], color='k', drawstyle='steps-pre')
pPos.set_ylim(-1.2, 1.2)
plt.sca(pPos)
plt.tight_layout()
plt.xticks(xindex, tradeTimeIndex, rotation=30) # 旋转15
plt.show()
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""发送策略更新事件,回测中忽略"""
pass
#----------------------------------------------------------------------
def setSlippage(self, slippage):
"""设置滑点点数"""
self.slippage = slippage
#----------------------------------------------------------------------
def setSize(self, size):
"""设置合约大小"""
self.size = size
#----------------------------------------------------------------------
def setRate(self, rate):
"""设置佣金比例"""
self.rate = rate
#----------------------------------------------------------------------
def setPriceTick(self, priceTick):
"""设置价格最小变动"""
self.priceTick = priceTick
#----------------------------------------------------------------------
def runOptimization(self, strategyClass, optimizationSetting):
"""优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 遍历优化
resultList = []
for setting in settingList:
self.clearBacktestingResult()
self.output('-' * 30)
self.output('setting: %s' %str(setting))
self.initStrategy(strategyClass, setting)
self.runBacktesting()
d = self.calculateBacktestingResult()
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
resultList.append(([str(setting)], targetValue))
# 显示结果
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'%s: %s' %(result[0], result[1]))
return result
#----------------------------------------------------------------------
def clearBacktestingResult(self):
"""清空之前回测的结果"""
# 清空限价单相关
self.limitOrderCount = 0
self.limitOrderDict.clear()
self.workingLimitOrderDict.clear()
# 清空停止单相关
self.stopOrderCount = 0
self.stopOrderDict.clear()
self.workingStopOrderDict.clear()
# 清空成交相关
self.tradeCount = 0
self.tradeDict.clear()
#----------------------------------------------------------------------
def runParallelOptimization(self, strategyClass, optimizationSetting):
"""并行优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 多进程优化,启动一个对应CPU核心数量的进程池
pool = multiprocessing.Pool(multiprocessing.cpu_count())
l = []
for setting in settingList:
l.append(pool.apply_async(optimize, (strategyClass, setting,
targetName, self.mode,
self.startDate, self.initDays, self.endDate,
self.slippage, self.rate, self.size,
self.dbName, self.symbol)))
pool.close()
pool.join()
# 显示结果
resultList = [res.get() for res in l]
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'%s: %s' %(result[0], result[1]))
#----------------------------------------------------------------------
def roundToPriceTick(self, price):
"""取整价格到合约最小价格变动"""
if not self.priceTick:
return price
newPrice = round(price/self.priceTick, 0) * self.priceTick
return newPrice
########################################################################
class TradingResult(object):
"""每笔交易的结果"""
#----------------------------------------------------------------------
def __init__(self, entryPrice, entryDt, exitPrice,
exitDt, volume, rate, slippage, size):
"""Constructor"""
self.entryPrice = entryPrice # 开仓价格
self.exitPrice = exitPrice # 平仓价格
self.entryDt = entryDt # 开仓时间datetime
self.exitDt = exitDt # 平仓时间
self.volume = volume # 交易数量(+/-代表方向)
self.turnover = (self.entryPrice+self.exitPrice)*size*abs(volume) # 成交金额
self.commission = self.turnover*rate # 手续费成本
self.slippage = slippage*2*size*abs(volume) # 滑点成本
self.pnl = ((self.exitPrice - self.entryPrice) * volume * size
- self.commission - self.slippage) # 净盈亏
########################################################################
class OptimizationSetting(object):
"""优化设置"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.paramDict = OrderedDict()
self.optimizeTarget = '' # 优化目标字段
#----------------------------------------------------------------------
def addParameter(self, name, start, end=None, step=None):
"""增加优化参数"""
if end is None and step is None:
self.paramDict[name] = [start]
return
if end < start:
print(u'参数起始点必须不大于终止点')
return
if step <= 0:
print(u'参数布进必须大于0')
return
l = []
param = start
while param <= end:
l.append(param)
param += step
self.paramDict[name] = l
#----------------------------------------------------------------------
def generateSetting(self):
"""生成优化参数组合"""
# 参数名的列表
nameList = self.paramDict.keys()
paramList = self.paramDict.values()
# 使用迭代工具生产参数对组合
productList = list(product(*paramList))
# 把参数对组合打包到一个个字典组成的列表中
settingList = []
for p in productList:
d = dict(zip(nameList, p))
settingList.append(d)
return settingList
#----------------------------------------------------------------------
def setOptimizeTarget(self, target):
"""设置优化目标字段"""
self.optimizeTarget = target
#----------------------------------------------------------------------
def formatNumber(n):
"""格式化数字到字符串"""
rn = round(n, 2) # 保留两位小数
return format(rn, ',') # 加上千分符
#----------------------------------------------------------------------
def optimize(strategyClass, setting, targetName,
mode, startDate, initDays, endDate,
slippage, rate, size,
dbName, symbol):
"""多进程优化时跑在每个进程中运行的函数"""
engine = BacktestingEngine()
engine.setBacktestingMode(mode)
engine.setStartDate(startDate, initDays)
engine.setEndDate(endDate)
engine.setSlippage(slippage)
engine.setRate(rate)
engine.setSize(size)
engine.setDatabase(dbName, symbol)
engine.initStrategy(strategyClass, setting)
engine.runBacktesting()
d = engine.calculateBacktestingResult()
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
return (str(setting), targetValue)
if __name__ == '__main__':
# 以下内容是一段回测脚本的演示,用户可以根据自己的需求修改
# 建议使用ipython notebook或者spyder来做回测
# 同样可以在命令模式下进行回测(一行一行输入运行)
from strategy.strategyEmaDemo import *
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20110101')
# 载入历史数据到引擎中
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
# 在引擎中创建策略对象
engine.initStrategy(EmaDemoStrategy, {})
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
# spyder或者ipython notebook中运行时,会弹出盈亏曲线图
# 直接在cmd中回测则只会打印一些回测数值
engine.showBacktestingResult()
| mit |
mcnalu/linuxvoice-imaging | bayer_split.py | 1 | 1184 | import rawpy, matplotlib.pyplot as plt, numpy as np
path='tree.exif.dng'
raw=rawpy.imread(path)
print 'Sizes of the image:',raw.sizes
print 'Bayer pattern:\n',raw.raw_pattern
print 'Indices 0,1,2,3 in above are, in order: ',raw.color_desc
print 'camera_whitebalance',raw.camera_whitebalance
print 'daylight_whitebalance',raw.daylight_whitebalance
##note the two following methods are (y,x) - annoying!
print 'Colour of bayer pixel at 101,100:',raw.raw_color(101,100)
print 'Value of bayer pixel at 101,100:',raw.raw_value(101,100)
nx=raw.raw_image.shape[1]
ny=raw.raw_image.shape[0]
ris=raw.raw_image.astype(float)
rismax=ris.max()
#Make an rgb bayer image
rgb=np.zeros((ny,nx,3), 'float')
rgb[1::2,0::2,0]=ris[1::2,0::2]/rismax
rgb[0::2,0::2,1]=ris[0::2,0::2]/rismax
rgb[1::2,1::2,1]=ris[1::2,1::2]/rismax
rgb[0::2,1::2,2]=ris[0::2,1::2]/rismax
plt.imshow(rgb, interpolation='none')
plt.show()
#Perform a half-resolution demosaic
rgbi=np.zeros((ny/2,nx/2,3), 'float')
rgbi[::,::,0]=ris[1::2,0::2]/rismax
rgbi[::,::,1]=0.5*(ris[0::2,0::2]+ris[1::2,1::2])/rismax
rgbi[::,::,2]=ris[0::2,1::2]/rismax
#print rgbi.max()
#imgplot=plt.imshow(rgbi, interpolation='none')
#plt.show()
| gpl-3.0 |
yujikato/DIRAC | src/DIRAC/Core/Utilities/Graphs/Graph.py | 2 | 12162 | """ Graph is a class providing layouts for the complete plot images including
titles multiple plots and a legend
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import datetime
import time
import os
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from DIRAC.Core.Utilities.Graphs.GraphUtilities import pixelToPoint, evalPrefs, \
to_timestamp, add_time_to_title
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from DIRAC.Core.Utilities.Graphs.Legend import Legend
from DIRAC import gLogger
DEBUG = 0
class Graph(object):
def __init__(self, *args, **kw):
super(Graph, self).__init__(*args, **kw)
def layoutFigure(self, legend):
prefs = self.prefs
# Get the main Figure object
# self.figure = Figure()
figure = self.figure
self.canvas = FigureCanvasAgg(figure)
dpi = prefs['dpi']
width = float(prefs['width'])
height = float(prefs['height'])
width_inch = width / dpi
height_inch = height / dpi
figure.set_size_inches(width_inch, height_inch)
figure.set_dpi(dpi)
figure.set_facecolor(prefs.get('background_color', 'white'))
figure_padding = float(prefs['figure_padding'])
figure_left_padding = float(prefs.get('figure_left_padding', figure_padding))
figure_right_padding = float(prefs.get('figure_right_padding', figure_padding))
figure_top_padding = float(prefs.get('figure_top_padding', figure_padding))
figure_bottom_padding = float(prefs.get('figure_bottom_padding', figure_padding))
text_size = prefs.get('text_size', 8)
text_padding = prefs.get('text_padding', 5)
#######################################
# Make the graph title
title = prefs.get('title', '')
subtitle = ''
title_size = 0
title_padding = 0
if title:
title_size = prefs.get('title_size', 1.5 * text_size)
title_padding = float(prefs.get('title_padding', 1.5 * text_padding))
figure.text(0.5, 1. - (title_size + figure_padding) / height, title,
ha='center', va='bottom', size=pixelToPoint(title_size, dpi))
subtitle = prefs.get('subtitle', '')
if subtitle:
sublines = subtitle.split('\n')
nsublines = len(sublines)
subtitle_size = prefs.get('subtitle_size', 1.2 * text_size)
subtitle_padding = float(prefs.get('subtitle_padding', 1.2 * text_padding))
top_offset = subtitle_size + subtitle_padding + title_size + figure_padding
for subline in sublines:
figure.text(0.5, 1. - (top_offset) / height,
subline, ha='center', va='bottom',
size=pixelToPoint(subtitle_size, dpi), fontstyle='italic')
top_offset += subtitle_size + subtitle_padding
########################################
# Evaluate the plot area dimensions
graph_width = width - figure_left_padding - figure_right_padding
graph_height = height - figure_top_padding - figure_bottom_padding
if title:
graph_height = graph_height - title_padding - title_size
if subtitle:
graph_height = graph_height - nsublines * (subtitle_size + subtitle_padding)
graph_left = figure_left_padding
graph_bottom = figure_bottom_padding
#########################################
# Make the plot time stamp if requested
flag = prefs.get('graph_time_stamp', True)
if flag:
timeString = "Generated on " + \
datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S ') + 'UTC'
time_size = prefs['text_size'] * .8
figure.text(
0.995,
0.005,
timeString,
ha='right',
va='bottom',
size=pixelToPoint(
time_size,
dpi),
fontstyle='italic')
#########################################
# Make the graph Legend if requested
legend_flag = prefs['legend']
legend_ax = None
column_width = legend.column_width
if legend_flag:
legend_position = prefs['legend_position']
# legend_width = float(prefs['legend_width'])
# legend_height = float(prefs['legend_height'])
legend_width, legend_height, legend_max_height = legend.getLegendSize()
legend_padding = float(prefs['legend_padding'])
if legend_position in ['right', 'left']:
# One column in case of vertical legend
legend_width = column_width + legend_padding
legend_height = min(graph_height, legend_max_height)
bottom = (height - title_size - title_padding - legend_height) / 2. / height
if legend_position == 'right':
left = 1. - (figure_padding + legend_width) / width
else:
left = figure_padding / width
graph_left = graph_left + legend_width
graph_width = graph_width - legend_width - legend_padding
elif legend_position == 'bottom':
bottom = figure_padding / height
left = (width - legend_width) / 2. / width
graph_height = graph_height - legend_height - legend_padding
graph_bottom = graph_bottom + legend_height + legend_padding
legend_rect = (left, bottom, legend_width / width, legend_height / height)
legend_ax = figure.add_axes(legend_rect)
###########################################
# Make the plot spots
plot_grid = prefs['plot_grid']
nx = int(plot_grid.split(':')[0])
ny = int(plot_grid.split(':')[1])
plot_axes = []
for j in range(ny - 1, -1, -1):
for i in range(nx):
plot_rect = ((graph_left + graph_width * i / nx) / width,
(graph_bottom + graph_height * j / ny) / height,
graph_width / nx / width,
graph_height / ny / height)
plot_axes.append(figure.add_axes(plot_rect))
return legend_ax, plot_axes
def makeTextGraph(self, text='Empty image'):
""" Make an empty text image
"""
self.figure = Figure()
figure = self.figure
self.canvas = FigureCanvasAgg(figure)
prefs = self.prefs
dpi = prefs['dpi']
width = float(prefs['width'])
height = float(prefs['height'])
width_inch = width / dpi
height_inch = height / dpi
figure.set_size_inches(width_inch, height_inch)
figure.set_dpi(dpi)
figure.set_facecolor('white')
text_size = prefs.get('text_size', 12)
figure.text(.5, .5, text, horizontalalignment='center',
size=pixelToPoint(text_size, dpi))
def makeGraph(self, data, *args, **kw):
start = time.time()
# Evaluate all the preferences
self.prefs = evalPrefs(*args, **kw)
prefs = self.prefs
if DEBUG:
print("makeGraph time 1", time.time() - start)
start = time.time()
if 'text_image' in prefs:
self.makeTextGraph(str(prefs['text_image']))
return
# Evaluate the number of plots and their requested layout
metadata = prefs.get('metadata', {})
plot_grid = prefs.get('plot_grid', '1:1')
nx = int(plot_grid.split(':')[0])
ny = int(plot_grid.split(':')[1])
nPlots = nx * ny
if nPlots == 1:
if not isinstance(data, list):
data = [data]
if not isinstance(metadata, list):
metadata = [metadata]
else:
if not isinstance(data, list):
# return S_ERROR('Single data for multiplot graph')
print('Single data for multiplot graph')
return
if not isinstance(metadata, list):
metaList = []
for _ in range(nPlots):
metaList.append(metadata)
metadata = metaList
# Initialize plot data
graphData = []
plot_prefs = []
for i in range(nPlots):
plot_prefs.append(evalPrefs(prefs, metadata[i]))
gdata = GraphData(data[i])
if i == 0:
plot_type = plot_prefs[i]['plot_type']
if 'sort_labels' in plot_prefs[i]:
reverse = plot_prefs[i].get('reverse_labels', False)
gdata.sortLabels(plot_prefs[i]['sort_labels'], reverse_order=reverse)
if 'limit_labels' in plot_prefs[i]:
if plot_prefs[i]['limit_labels'] > 0:
gdata.truncateLabels(plot_prefs[i]['limit_labels'])
if 'cumulate_data' in plot_prefs[i]:
gdata.makeCumulativeGraph()
plot_title = plot_prefs[i].get('plot_title', '')
if plot_title != "NoTitle":
begin = ''
end = ''
if 'starttime' in plot_prefs[i] and 'endtime'in plot_prefs[i]:
begin = to_timestamp(plot_prefs[i]['starttime'])
end = to_timestamp(plot_prefs[i]['endtime'])
elif gdata.key_type == "time":
begin = gdata.min_key
end = gdata.max_key
if begin and end:
time_title = add_time_to_title(begin, end)
if plot_title:
plot_title += ":"
plot_prefs[i]['plot_title'] = plot_title + ' ' + time_title
graphData.append(gdata)
# Do not make legend for the plot with non-string keys (except for PieGraphs)
if not graphData[0].subplots and graphData[0].key_type != 'string' and not plot_type == 'PieGraph':
prefs['legend'] = False
if prefs['legend'] and graphData[0].key_type != 'string' and plot_type == 'PieGraph':
graphData[0].initialize(key_type='string')
legend = Legend(graphData[0], None, prefs)
self.figure = Figure()
# Make Water Mark
image = prefs.get('watermark', None)
self.drawWaterMark(image)
legend_ax, plot_axes = self.layoutFigure(legend)
if DEBUG:
print("makeGraph time layout", time.time() - start)
start = time.time()
# Make plots
for i in range(nPlots):
plot_type = plot_prefs[i]['plot_type']
try:
# TODO: Remove when we moved to python3
exec("import %s" % plot_type)
except ImportError:
print("Trying to use python like import")
try:
exec("from . import %s" % plot_type)
except ImportError as x:
print("Failed to import graph type %s: %s" % (plot_type, str(x)))
return None
ax = plot_axes[i]
plot = eval("%s.%s(graphData[i],ax,plot_prefs[i])" % (plot_type, plot_type))
plot.draw()
if DEBUG:
print("makeGraph time plots", time.time() - start)
start = time.time()
# Make legend
if legend_ax:
legend.setAxes(legend_ax)
legend.draw()
if DEBUG:
print("makeGraph time legend", time.time() - start)
start = time.time()
# return S_OK()
def drawWaterMark(self, imagePath=None):
""" Make the figure water mark
"""
prefs = self.prefs
try:
from PIL import Image, ImageEnhance
except ImportError:
return
if not imagePath:
if 'watermark' in prefs:
imagePath = os.path.expandvars(os.path.expanduser(prefs['watermark']))
if not imagePath:
return
try:
image = Image.open(imagePath)
enh = ImageEnhance.Contrast(image)
i = enh.enhance(.1)
img_size = i.size
resize = 1.0
if prefs['width'] < img_size[0]:
resize = prefs['width'] / float(img_size[0])
if prefs['height'] < img_size[1]:
resize = min(resize, prefs['height'] / float(img_size[1]))
box = (0.5 - img_size[0] / float(prefs['width']) * resize / 2.,
0.5 - img_size[1] / float(prefs['height']) * resize / 2.,
img_size[0] / float(prefs['width']) * resize,
img_size[1] / float(prefs['height']) * resize)
# print box
ax_wm = self.figure.add_axes(box)
ax_wm.imshow(i, origin='lower', aspect='equal', zorder=-10)
ax_wm.axis('off')
ax_wm.set_frame_on(False)
ax_wm.set_clip_on(False)
except Exception as e:
print(e)
def writeGraph(self, fname, fileFormat='PNG'):
""" Write out the resulting graph to a file with fname in a given format
"""
self.canvas.draw()
if fileFormat.lower() == 'png':
self.canvas.print_png(fname)
else:
gLogger.error("File format '%s' is not supported!" % fileFormat)
| gpl-3.0 |
Djabbz/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
deprofundis/deprofundis | models/scripts/example_gaussian.py | 1 | 1682 | from models.sampler import BlockGibbsSampler
from models.distribution import GaussianBinary
from models.optimizer import SGD
from models.rbm import RBM
from data.mnist.path import *
from utils.utils import prepare_batches
from matplotlib import pyplot
import sklearn.preprocessing as pre
import pandas, numpy, time
SIZE_BATCH = 10
EPOCHS = 10
SIZE_HIDDEN = 500
SIZE_VISIBLE = 784
# load binary mnist sample dataset
dataset = pandas.read_csv(MNIST_TRAIN, delimiter=',', dtype=numpy.float64, header=None)
# leave the first column out since it contains the labels
# dataset must be normalized to have unit variance by column (sigma_i == 1)
dataset = pre.scale(dataset.values[:,1:], axis=0)
# compute batch set
idx = prepare_batches(len(dataset), SIZE_BATCH)
# load distribution
gaussian = GaussianBinary(SIZE_VISIBLE, SIZE_HIDDEN)
gibbs = BlockGibbsSampler(gaussian, sampling_steps=1)
sgd = SGD(gaussian, learning_rate=0.001, weight_decay=0, momentum=0)
rbm = RBM(gaussian, gibbs, sgd)
# pyplot.figure(1)
# pyplot.ion()
# pyplot.show()
# vmin = numpy.min(dataset)
# vmax = numpy.max(dataset)
for epoch in range(EPOCHS):
for b_idx in idx:
batch = dataset[b_idx[0]:b_idx[1], :]
d_weight_update, _, _ = rbm.train_batch(batch)
rec_probs, rec_state = rbm.reconstruct(batch,steps=10)
pyplot.clf()
img = numpy.reshape(rec_state[-1,:], newshape=(28,28))
print "Max: " + str(numpy.max(img)) + " Min: " + str(numpy.min(img))
# pyplot.hist(d_weight_update)
# pyplot.draw()
# pyplot.matshow(img, fignum=0, cmap=pyplot.cm.gray, vmin=vmin , vmax=vmax)
# pyplot.draw()
# time.sleep(0.1)
raw_input() | mit |
btabibian/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 47 | 3599 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data with truncated SVD.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result after dimensionality reduction using truncated SVD
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("Truncated SVD reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
zaxtax/scikit-learn | examples/mixture/plot_gmm_selection.py | 36 | 3271 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 1 | 21273 | import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics import cluster as cluster_module
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import accuracy_score
from sklearn.metrics.scorer import _check_multimetric_scoring
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss']
# All supervised cluster scorers (They behave like classification metric)
CLUSTER_SCORERS = ["adjusted_rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score"]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_clf) for name in CLUSTER_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test scoring validators"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def check_scoring_validator_for_single_metric_usecases(scoring_validator):
# Test all branches of single metric usecases
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, scoring_validator, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = scoring_validator(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, scoring_validator, estimator)
scorer = scoring_validator(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
# Test the allow_none parameter for check_scoring alone
if scoring_validator is check_scoring:
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, allow_none=True)
assert_true(scorer is None)
def check_multimetric_scoring_single_metric_wrapper(*args, **kwargs):
# This wraps the _check_multimetric_scoring to take in single metric
# scoring parameter so we can run the tests that we will run for
# check_scoring, for check_multimetric_scoring too for single-metric
# usecases
scorers, is_multi = _check_multimetric_scoring(*args, **kwargs)
# For all single metric use cases, it should register as not multimetric
assert_false(is_multi)
if args[0] is not None:
assert_true(scorers is not None)
names, scorers = zip(*scorers.items())
assert_equal(len(scorers), 1)
assert_equal(names[0], 'score')
scorers = scorers[0]
return scorers
def test_check_scoring_and_check_multimetric_scoring():
check_scoring_validator_for_single_metric_usecases(check_scoring)
# To make sure the check_scoring is correctly applied to the constituent
# scorers
check_scoring_validator_for_single_metric_usecases(
check_multimetric_scoring_single_metric_wrapper)
# For multiple metric use cases
# Make sure it works for the valid cases
for scoring in (('accuracy',), ['precision'],
{'acc': 'accuracy', 'precision': 'precision'},
('accuracy', 'precision'), ['precision', 'accuracy'],
{'accuracy': make_scorer(accuracy_score),
'precision': make_scorer(precision_score)}):
estimator = LinearSVC(random_state=0)
estimator.fit([[1], [2], [3]], [1, 1, 0])
scorers, is_multi = _check_multimetric_scoring(estimator, scoring)
assert_true(is_multi)
assert_true(isinstance(scorers, dict))
assert_equal(sorted(scorers.keys()), sorted(list(scoring)))
assert_true(all([isinstance(scorer, _PredictScorer)
for scorer in list(scorers.values())]))
if 'acc' in scoring:
assert_almost_equal(scorers['acc'](
estimator, [[1], [2], [3]], [1, 0, 0]), 2. / 3.)
if 'accuracy' in scoring:
assert_almost_equal(scorers['accuracy'](
estimator, [[1], [2], [3]], [1, 0, 0]), 2. / 3.)
if 'precision' in scoring:
assert_almost_equal(scorers['precision'](
estimator, [[1], [2], [3]], [1, 0, 0]), 0.5)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
# Make sure it raises errors when scoring parameter is not valid.
# More weird corner cases are tested at test_validation.py
error_message_regexp = ".*must be unique strings.*"
for scoring in ((make_scorer(precision_score), # Tuple of callables
make_scorer(accuracy_score)), [5],
(make_scorer(precision_score),), (), ('f1', 'f1')):
assert_raises_regexp(ValueError, error_message_regexp,
_check_multimetric_scoring, estimator,
scoring=scoring)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), cluster_module.adjusted_rand_score)
| bsd-3-clause |
kchodorow/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 18 | 13148 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
zorroblue/scikit-learn | examples/covariance/plot_covariance_estimation.py | 38 | 5075 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
# #############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
# #############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
# #############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
# #############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularization parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
bnoi/scikit-tracker | sktracker/io/trackmate.py | 1 | 1955 | import xml.etree.cElementTree as et
import pandas as pd
import numpy as np
def trackmate_peak_import(trackmate_xml_path):
"""Import detected peaks with TrackMate Fiji plugin.
Parameters
----------
trackmate_xml_path : str
TrackMate XML file path.
"""
root = et.fromstring(open(trackmate_xml_path).read())
objects = []
object_labels = {'FRAME': 't_stamp',
'POSITION_T': 't',
'POSITION_X': 'x',
'POSITION_Y': 'y',
'POSITION_Z': 'z',
'MEAN_INTENSITY': 'I',
'ESTIMATED_DIAMETER': 'w',
'QUALITY': 'q'}
features = root.find('Model').find('FeatureDeclarations').find('SpotFeatures')
features = [c.get('feature') for c in features.getchildren()]
spots = root.find('Model').find('AllSpots')
trajs = pd.DataFrame([])
objects = []
for frame in spots.findall('SpotsInFrame'):
for spot in frame.findall('Spot'):
single_object = []
for label in features:
single_object.append(spot.get(label))
objects.append(single_object)
trajs = pd.DataFrame(objects, columns=features)
trajs = trajs.astype(np.float)
# Apply filters
spot_filters = root.find("Settings").find("SpotFilterCollection")
for spot_filter in spot_filters.findall('Filter'):
name = spot_filter.get('feature')
value = float(spot_filter.get('value'))
isabove = True if spot_filter.get('isabove') == 'true' else False
if isabove:
trajs = trajs[trajs[name] > value]
else:
trajs = trajs[trajs[name] < value]
trajs = trajs.loc[:, object_labels.keys()]
trajs.columns = [object_labels[k] for k in object_labels.keys()]
trajs['label'] = np.arange(trajs.shape[0])
trajs.set_index(['t_stamp', 'label'], inplace=True)
return trajs
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/mpl_toolkits/axes_grid1/mpl_axes.py | 8 | 4971 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import matplotlib.axes as maxes
from matplotlib.artist import Artist
from matplotlib.axis import XAxis, YAxis
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
def toggle_axisline(self, b):
warnings.warn("toggle_axisline is not necessary and deprecated in axes_grid1")
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start == None and k.stop == None and k.step == None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
super(Axes, self).__init__(*kl, **kw)
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
self._axislines["bottom"] = SimpleAxisArtist(self.xaxis, 1, self.spines["bottom"])
self._axislines["top"] = SimpleAxisArtist(self.xaxis, 2, self.spines["top"])
self._axislines["left"] = SimpleAxisArtist(self.yaxis, 1, self.spines["left"])
self._axislines["right"] = SimpleAxisArtist(self.yaxis, 2, self.spines["right"])
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def cla(self):
super(Axes, self).cla()
self._init_axis_artists()
class SimpleAxisArtist(Artist):
def __init__(self, axis, axisnum, spine):
self._axis = axis
self._axisnum = axisnum
self.line = spine
if isinstance(axis, XAxis):
self._axis_direction = ["bottom", "top"][axisnum-1]
elif isinstance(axis, YAxis):
self._axis_direction = ["left", "right"][axisnum-1]
else:
raise ValueError("axis must be instance of XAxis or YAxis : %s is provided" % (axis,))
Artist.__init__(self)
def _get_major_ticks(self):
tickline = "tick%dline" % self._axisnum
return SimpleChainedObjects([getattr(tick, tickline) for tick \
in self._axis.get_major_ticks()])
def _get_major_ticklabels(self):
label = "label%d" % self._axisnum
return SimpleChainedObjects([getattr(tick, label) for tick \
in self._axis.get_major_ticks()])
def _get_label(self):
return self._axis.label
major_ticks = property(_get_major_ticks)
major_ticklabels = property(_get_major_ticklabels)
label = property(_get_label)
def set_visible(self, b):
self.toggle(all=b)
self.line.set_visible(b)
self._axis.set_visible(True)
Artist.set_visible(self, b)
def set_label(self, txt):
self._axis.set_label_text(txt)
def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
if all:
_ticks, _ticklabels, _label = True, True, True
elif all is not None:
_ticks, _ticklabels, _label = False, False, False
else:
_ticks, _ticklabels, _label = None, None, None
if ticks is not None:
_ticks = ticks
if ticklabels is not None:
_ticklabels = ticklabels
if label is not None:
_label = label
tickOn = "tick%dOn" % self._axisnum
labelOn = "label%dOn" % self._axisnum
if _ticks is not None:
tickparam = {tickOn: _ticks}
self._axis.set_tick_params(**tickparam)
if _ticklabels is not None:
tickparam = {labelOn: _ticklabels}
self._axis.set_tick_params(**tickparam)
if _label is not None:
pos = self._axis.get_label_position()
if (pos == self._axis_direction) and not _label:
self._axis.label.set_visible(False)
elif _label:
self._axis.label.set_visible(True)
self._axis.set_label_position(self._axis_direction)
if __name__ == '__main__':
fig = figure()
ax = Axes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.cla()
| mit |
arahuja/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
mxjl620/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
siutanwong/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
Pragmatismo/Pigrow | scripts/gui/graph_modules/graph_day_night.py | 1 | 7609 |
def read_graph_options():
'''
Returns a dictionary of settings and their default values for use by the remote gui
'''
graph_module_settings_dict = {
"use_time":"lamp", # sun or lamp
"latitude":"51.50",
"longitude":"0.12",
"light_on_time_hour":"7",
"light_on_time_min":"0",
"light_off_time_hour":"22",
"light_off_time_min":"00",
"label_duration":"false",
"title_text":"",
"show_time_period":"true",
"color_cycle":"false",
"line_style":"-",
"marker":"",
"show_grid":"true",
"major_ticks":"",
"minor_ticks":"1",
"ylabel":""
}
return graph_module_settings_dict
def make_graph(list_of_datasets, graph_path, ymax="", ymin="", size_h="", size_v="", dh="", th="", tc="", dc="", extra=[]):
print("Making a day/night graph graph...")
import matplotlib
import datetime
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as plticker
day_color = "yellow"
night_color = "darkblue"
if extra == {}:
extra = read_graph_options()
# set variables to settings from dictionary converting to the appropriate type
use_time = extra['use_time'].lower()
latitude = float(extra['latitude'])
longitude = float(extra['longitude'])
light_on_time_hour = extra['light_on_time_hour']
light_on_time_min = extra['light_on_time_min']
light_off_time_hour = extra['light_off_time_hour']
light_off_time_min = extra['light_off_time_min']
label_duration = extra['label_duration']
title_text = extra['title_text']
color_cycle = extra['color_cycle'].lower()
if ',' in color_cycle:
color_cycle.split(",")
line_style = extra['line_style']
marker = extra['marker']
line_flags = marker + line_style
show_grid = extra['show_grid'].lower()
major_ticks = extra['major_ticks']
minor_ticks = extra['minor_ticks']
show_time_period = extra["show_time_period"]
ylabel = extra['ylabel']
#import the tools we'll be using
if use_time == "sun":
from suntime import Sun # to install run the command pip3 install suntime
sun = Sun(latitude, longitude)
def make_dict_of_sets(date_list, value_list, key_list):
# make a dictionary containing every day's list of dates and values'
dictionary_of_sets = {}
light_markers = []
durations = []
for log_item_pos in range(0, len(date_list)):
day_group = date_list[log_item_pos].strftime("%Y:%m:%d")
log_time = date_list[log_item_pos]
#log_time = log_time.replace(year=1980, month=1, day=1)
if day_group in dictionary_of_sets:
# Read existing lists of dates and values
values_to_graph = dictionary_of_sets[day_group][0]
dates_to_graph = dictionary_of_sets[day_group][1]
# add current value and date to lists
values_to_graph.append(value_list[log_item_pos])
dates_to_graph.append(log_time)
else:
# create new date and value lists if the day_group doesn't exists yet
values_to_graph = [value_list[log_item_pos]]
dates_to_graph = [log_time]
# creat light on and off values for lamp
# create sunrise and set markers
day_text_split = day_group.split(":")
ymd_dayname = datetime.date(int(day_text_split[0]), int(day_text_split[1]), int(day_text_split[2]))
if use_time == "sun":
sunrise = sun.get_local_sunrise_time(ymd_dayname)
sunset = sun.get_local_sunset_time(ymd_dayname)
light_markers.append(sunrise)
light_markers.append(sunset)
duration = sunset - sunrise
print(duration)
durations.append(duration)
durations.append("")
else:
light_on = day_group + " " + light_on_time_hour + ":" + light_on_time_min + ":00"
light_off = day_group + " " + light_off_time_hour + ":" + light_off_time_min + ":00"
light_on = datetime.datetime.strptime(light_on, "%Y:%m:%d %H:%M:%S")
light_off = datetime.datetime.strptime(light_off, "%Y:%m:%d %H:%M:%S")
light_markers.append(light_on)
light_markers.append(light_off)
duration = light_off - light_on
print(duration)
durations.append(duration)
durations.append("")
# put the lists of values and dates into the dictionary of sets under the daygroup key
dictionary_of_sets[day_group]=[values_to_graph, dates_to_graph]
return dictionary_of_sets, light_markers, durations
# define a graph space
fig, ax = plt.subplots(figsize=(size_h, size_v))
if not color_cycle == 'false' and not color_cycle.strip() == '':
ax.set_prop_cycle(color=color_cycle)
# cycle through and make plot
for x in list_of_datasets:
date_list = x[0]
value_list = x[1]
key_list = x[2]
dictionary_of_sets, light_markers, durations = make_dict_of_sets(date_list, value_list, key_list)
print(len(light_markers), len(durations))
ax.plot(date_list, value_list, label=key_list[0], lw=1)
flip_color = day_color
for x in range(0, len(light_markers)-1):
pos1 = mdates.date2num(light_markers[x])
pos2 = mdates.date2num(light_markers[x+1])
ax.axvspan(pos1, pos2, color=flip_color, alpha=0.3)
text_pos = pos2
if label_duration == "true":
if not ymin == "":
label_height = float(ymin)
else:
label_height = 0
ax.text(text_pos, label_height, " " + str(durations[x]), rotation=90,va='bottom',ha='right')
if flip_color == night_color:
flip_color = day_color
else:
flip_color = night_color
#plt.axvline(x, color='darkblue', linewidth=5,alpha=0.3)
# organise the graphing area
if not major_ticks == "":
loc = plticker.MultipleLocator(base=float(major_ticks)) # this locator puts ticks at regular intervals
ax.yaxis.set_major_locator(loc)
if not minor_ticks == "":
loc = plticker.MultipleLocator(base=float(minor_ticks)) # this locator puts ticks at regular intervals
ax.yaxis.set_minor_locator(loc)
if show_grid == "true":
plt.grid(axis='y')
if show_time_period == "true":
title_text = title_text + "\nTime Perod; " + str(date_list[0].strftime("%b-%d %H:%M")) + " to " + str(date_list[-1].strftime("%b-%d %H:%M"))
plt.title(title_text)
if len(list_of_datasets) > 1:
ax.legend()
ax.xaxis_date()
fig.autofmt_xdate()
plt.ylabel(ylabel)
if not ymax == "":
plt.ylim(ymax=float(ymax))
if not ymin == "":
plt.ylim(ymin=float(ymin))
# save the graph and tidy up our workspace
plt.savefig(graph_path)
print("divided days created and saved to " + graph_path)
plt.close(fig)
| gpl-3.0 |
Sh1n/AML-ALL-classifier | step6.py | 1 | 3066 | import Orange
import logging
import random
from sklearn.externals import joblib
from discretization import *
from FeatureSelector import *
from utils import *
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import f1_score, precision_recall_fscore_support, classification_report
from sklearn.feature_extraction import DictVectorizer
import numpy as np
from sklearn import preprocessing
# Vars
baseline = .9496
K = 1
S = 2000
C = 1.5
load = False
negate = False
# Utilities
logging.basicConfig(filename='main.log',level=logging.DEBUG,format='%(levelname)s\t%(message)s')
def logmessage(message, color):
print color(message)
logging.info(message)
def copyDataset(dataset):
return Orange.data.Table(dataset)
# ============================================================================ #
boxmessage("Starting Phase 6: Final", warning)
testSet = Orange.data.Table("finaltestset.tab")
logmessage("Final Test Set loaded", info)
# Discretizer
ds = Discretizer(testSet, K, logging)
ds.load()
logmessage("Discretizer Loaded", info)
# Feature Selector
fs = FeatureSelector()
fs.load()
fs.setThreshold(S)
logmessage("Feature Selector Loaded", info)
# LabelEncoder
le = None
with open("labelencoder", "r") as in_file:
le = pickle.load(in_file)
logmessage("Label Encoder Loaded", info)
# Model
clf = joblib.load('classifier.model')
logmessage("Classifier Loaded", info)
#discretizedSet = ds.discretizeDataset(trainingSet)
# ============================================================================ #
if not load:
testSet3 = ds.discretizeDataset(testSet) # Apply Discretization
logmessage("TestSet Discretized", success)
testSet3.save("final_testset_discretized.tab")
if not load:
testSet4 = fs.select(testSet3)
logmessage("Feature Selection Applied", success)
testSet4.save("final_testset_selected.tab")
else:
testSet4 = Orange.data.Table("final_testset_selected.tab")
converted_test_data = ([le.transform([ d[f].value for f in testSet4.domain if f != testSet4.domain.class_var]) for d in testSet4])
converted_test_targets = le.transform([d[testSet4.domain.class_var].value for d in testSet4 ])
logmessage("Label Encoding Applied", success)
print converted_test_targets
logmessage("Starting Prediction Task", info)
prediction = clf.predict(converted_test_data)
if negate:
prediction = np.array([933 if p == 934 else 934 for p in prediction])
print "Prediction: \t", prediction
print classification_report(converted_test_targets, prediction)
p, r, f1, support = precision_recall_fscore_support(converted_test_targets, prediction,average="micro")
print p
print r
print f1
print support
# Save scores
#scores = open('scores', 'a')
#scores.write("%s\n" % (np.average(f1)))
#scores.close()
# Save scores
scores = open('finalscores', 'a')
scores.write("%s\n" % (np.average(f1)))
scores.close()
f1_avg = np.average(f1)
logmessage("Average F1(Over 2 classes): %s" % f1_avg, info)
if f1_avg > baseline:
logmessage("Performance Increased", success)
else:
logmessage("Performance Decreased", error)
| gpl-2.0 |
FelipeTulio/Algoritmos_Geneticos_Python | ag_caixa_preta.py | 1 | 13887 | #-*- coding: utf-8 -*-
"""
Created on Sat Sep 14 14:42:15 2013
@Autores:
Cleidson dos Santos Souza
Felipe Túlio de Castro
Guilherme Macedo Mendes
Rennan Aquino Neri
"""
# importação e renomeação dos pacotes de funções
import numpy as np
import matplotlib as mpl
import os
from scipy.stats.mstats import mquantiles
#Limpeza da tela do shell Python
os.system('cls')
#Construção do menu do script - Cabeçalho
print u'Universidade Estadual de Montes Claros (Unimontes)'
print u'Especialização em Engenharia de Sistemas'
print u'Disciplina: Algoritmos Genéticos'
print u'Professor: Renato Dourado Maia'
print u'Problema da Caixa Preta - Trabalho Prático \n'
print u'Equipe de desenvolvimento: \nCleidson dos Santos Souza; Felipe Túlio de Castro; Guilherme Macedo Mendes; Rennan Aquino Neri. \n'
#Menu de configuração paramétrica
print u'MENU DE CONFIGURAÇÃO DO SISTEMA \n'
print u'Escolha o tipo de seleção que deseja utilizar: (1) Roleta (2) Torneio'
Op_Selecao = input('Resposta -> ')
print u'Escolha o tipo de cruzamento que deseja utilizar: (3) Ponto de corte (4) Uniforme'
Op_Cruzamento = input('Resposta -> ')
print u'Escolha o tipo de mutação que deseja utilizar: (5) Bit (6) Bit a Bit '
Op_Mutacao = input('Resposta -> ')
print u'Deseja utilizar o elitismo no algoritmo: (7) Sim (8) Não'
Op_Elitismo = input('Resposta -> ')
print u'Defina a taxa de cruzamento: '
Prob_Cruzamento = input('Resposta -> ')
print u'Defina a taxa de mutação: '
Taxa_Mutacao = input('Resposta -> ')
print u'Defina a quantidade de indivíduos da população: '
Tam_Populacao = input('Resposta -> ')
print u'Defina a quantidade de gerações: '
Maximo_Geracoes = input('Resposta -> ')
#Inicialização do código genético
print '\n'
print u'INICIALIZAÇÃO DO ALGORITMO GENÉTICO\n'
#Declaração das variáveis iniciais
Num_Execucoes = 100
Tam_Individuo = 36
Maior_Fitness = 0
Menor_Fitness = 0
Num_sucessos = 0
Solucao_Otima = 0
Execucao = 1
Linha = 0
Coluna = 0
Somatoria_Fitness = 0
Populacao_Selecionada = np.zeros((Tam_Populacao, Tam_Individuo))
Populacao_Cruzamento = np.zeros((Tam_Populacao, Tam_Individuo))
Populacao_Mutacao = np.zeros((Tam_Populacao, Tam_Individuo))
Mascara_Bits = np.zeros((1,Tam_Individuo))
Media_Fitness_Geracao = np.zeros((Maximo_Geracoes,1))
Melhor_Fitness_Geracao = np.zeros((Maximo_Geracoes,1))
Melhor_individuo_Geracao = np.zeros((Maximo_Geracoes,Tam_Individuo))
Maior_Fitness_Execucao = np.zeros((Num_Execucoes,1))
Menor_Fitness_Execucao = np.zeros((Num_Execucoes,1))
Media_Fitness_Execucao = np.zeros((Num_Execucoes,1))
#Laço de repetição das execuções solicitadas para o trabalho
while (Execucao <= Num_Execucoes):
Populacao_Inicial = np.ones((Tam_Populacao, Tam_Individuo))
#Geração da população inicial
for Linha in range(Tam_Populacao):
for Coluna in range(Tam_Individuo):
Populacao_Inicial[Linha,Coluna] = round(np.random.rand(1,1))
# Laço de repetição das gerações para a evoluççao do Algoritmo Genético
Geracao = 1
Media_Fitness_Geracao = np.zeros((Maximo_Geracoes+1,1))
Melhor_Fitness_Geracao = np.zeros((Maximo_Geracoes+1,1))
Melhor_Individuo = np.zeros((Maximo_Geracoes+1,Tam_Individuo))
while (Geracao <= Maximo_Geracoes):
#Função que gera o fitness dos indivíduos
Fitness = np.zeros((Tam_Populacao,1))
for Linha in range(Tam_Populacao):
b01 = Populacao_Inicial[Linha,1] * Populacao_Inicial[Linha,4];
b02 = Populacao_Inicial[Linha,22] * Populacao_Inicial[Linha,13];
b03 = Populacao_Inicial[Linha,23] * Populacao_Inicial[Linha,3];
b04 = Populacao_Inicial[Linha,20] * Populacao_Inicial[Linha,9];
b05 = Populacao_Inicial[Linha,35] * Populacao_Inicial[Linha,14];
b06 = Populacao_Inicial[Linha,10] * Populacao_Inicial[Linha,25];
b07 = Populacao_Inicial[Linha,15] * Populacao_Inicial[Linha,16];
b08 = Populacao_Inicial[Linha,2] * Populacao_Inicial[Linha,32];
b09 = Populacao_Inicial[Linha,27] * Populacao_Inicial[Linha,18];
b10 = Populacao_Inicial[Linha,11] * Populacao_Inicial[Linha,33];
b11 = Populacao_Inicial[Linha,30] * Populacao_Inicial[Linha,31];
b12 = Populacao_Inicial[Linha,21] * Populacao_Inicial[Linha,24];
b13 = Populacao_Inicial[Linha,34] * Populacao_Inicial[Linha,26];
b14 = Populacao_Inicial[Linha,28] * Populacao_Inicial[Linha,6];
b15 = Populacao_Inicial[Linha,7] * Populacao_Inicial[Linha,12];
b16 = Populacao_Inicial[Linha,5] * Populacao_Inicial[Linha,8];
b17 = Populacao_Inicial[Linha,17] * Populacao_Inicial[Linha,19];
b18 = Populacao_Inicial[Linha,0] * Populacao_Inicial[Linha,29];
b19 = Populacao_Inicial[Linha,22] * Populacao_Inicial[Linha,3];
b20 = Populacao_Inicial[Linha,20] * Populacao_Inicial[Linha,14];
b21 = Populacao_Inicial[Linha,25] * Populacao_Inicial[Linha,15];
b22 = Populacao_Inicial[Linha,30] * Populacao_Inicial[Linha,11];
b23 = Populacao_Inicial[Linha,24] * Populacao_Inicial[Linha,18];
b24 = Populacao_Inicial[Linha,6] * Populacao_Inicial[Linha,7];
b25 = Populacao_Inicial[Linha,8] * Populacao_Inicial[Linha,17];
b26 = Populacao_Inicial[Linha,0] * Populacao_Inicial[Linha,32];
Fitness[Linha, 0] = 9 + b01 - b02 + b03 - b04 + b05 - b06 + b07 + b08 + b09 + b10 \
- b11 - b12 + b13 - b14 + b15 - b16 + b17 - b18 + b19 + b20 + b21 + b22 \
+ b23 + b24 + b25 + b26
#----------------------------------------------------------------------------------------------
#Função de elitismo
if Op_Elitismo == 7:
Media_Fitness_Geracao[Geracao,0] = np.mean(Fitness)
#valor = np.max(Fitness)
idc = np.argmax(Fitness)
Melhor_Individuo[Geracao,:] = Populacao_Inicial[idc,:]
Melhor_Fitness_Geracao[Geracao,0] = Fitness[idc,0]
if Geracao > 1:
if Melhor_Fitness_Geracao[Geracao,0] < Melhor_Fitness_Geracao[Geracao-1]:
Melhor_Individuo[Geracao,:] = Melhor_Individuo[Geracao-1,:]
Melhor_Fitness_Geracao[Geracao,0] = Melhor_Fitness_Geracao[Geracao-1,0]
Populacao_Inicial[1,:] = Melhor_Individuo[Geracao,:]
Fitness[1,0] = Melhor_Fitness_Geracao[Geracao,0]
#----------------------------------------------------------------------------------------------
#Funções de Seleçao
if Op_Selecao == 1:
Linha_Selecao = 2
Somatoria_Fitness = np.sum(Fitness)
for Linha_Selecao in range(Tam_Populacao):
Num_Randomico = round(np.random.random())
Acumulo = 0
j = 1
for j in range(Tam_Populacao):
Acumulo = Acumulo + Fitness[j,0]
if Acumulo >= Num_Randomico:
Populacao_Selecionada[Linha_Selecao,:] = Populacao_Inicial[j,:]
break
elif Op_Selecao == 2:
for Linha in range(Tam_Populacao):
num1 = round(np.random.random())
num2 = round(np.random.random())
if Fitness[num1,0] > Fitness[num2,0]:
Populacao_Selecionada[Linha,:] = Populacao_Inicial[num1,:]
elif Fitness[num1,0] < Fitness[num2,0]:
Populacao_Selecionada[Linha,:] = Populacao_Inicial[num2,:]
elif Fitness[num1,0] == Fitness[num2,0]:
Populacao_Selecionada[Linha,:] = Populacao_Inicial[num2,:]
#----------------------------------------------------------------------------------------------
#Funções de Cruzamento
if Op_Cruzamento == 3:
Ponto_Corte = np.random.randint(1,Tam_Individuo)
for i in range(Tam_Individuo):
if i < Ponto_Corte:
Mascara_Bits[0,i] = 0
elif i >= Ponto_Corte:
Mascara_Bits[0,i] = 1
Primeiro_Individuo = 0;
Segundo_Individuo = 0;
cont = 0
while (cont < Tam_Populacao):
while (Primeiro_Individuo == Segundo_Individuo):
Primeiro_Individuo = np.random.randint(0, Tam_Populacao)
Segundo_Individuo = np.random.randint(0, Tam_Populacao)
Taxa_Cruzamento = np.random.random()
if Taxa_Cruzamento <= Prob_Cruzamento:
for j in range (Tam_Individuo):
if Mascara_Bits[0,j] == 0:
Populacao_Cruzamento[cont,j] = Populacao_Selecionada[Primeiro_Individuo,j]
Populacao_Cruzamento[cont+1,j] = Populacao_Selecionada[Segundo_Individuo,j]
elif Mascara_Bits[0,j] == 1:
Populacao_Cruzamento[cont,j]=Populacao_Selecionada[Segundo_Individuo,j]
Populacao_Cruzamento[cont+1,j]=Populacao_Selecionada[Primeiro_Individuo,j]
elif Taxa_Cruzamento > Prob_Cruzamento:
Populacao_Cruzamento[cont] = Populacao_Selecionada[Primeiro_Individuo]
Populacao_Cruzamento[cont+1] = Populacao_Selecionada[Segundo_Individuo]
cont = cont + 2
elif Op_Cruzamento == 4:
for i in range(Tam_Individuo):
Mascara_Bits[0,i] = np.random.choice([0,1])
# print Mascara_Bits
Primeiro_Individuo = 0;
Segundo_Individuo = 0;
cont = 0
while (cont < Tam_Populacao):
while (Primeiro_Individuo == Segundo_Individuo):
Primeiro_Individuo = np.random.randint(0,Tam_Populacao)
Segundo_Individuo = np.random.randint(0,Tam_Populacao)
Taxa_Cruzamento = np.random.random()
if Taxa_Cruzamento <= Prob_Cruzamento:
for j in range(Tam_Individuo):
if Mascara_Bits[0,j] == 0:
Populacao_Cruzamento[cont,j] = Populacao_Selecionada[Primeiro_Individuo,j]
Populacao_Cruzamento[cont+1,j] = Populacao_Selecionada[Segundo_Individuo,j]
elif Mascara_Bits[0,j] == 1:
Populacao_Cruzamento[cont,j]=Populacao_Selecionada[Segundo_Individuo,j]
Populacao_Cruzamento[cont+1,j]=Populacao_Selecionada[Primeiro_Individuo,j]
elif Taxa_Cruzamento > Prob_Cruzamento:
Populacao_Cruzamento[cont] = Populacao_Selecionada[Primeiro_Individuo]
Populacao_Cruzamento[cont+1] = Populacao_Selecionada[Segundo_Individuo]
cont = cont + 2
#---------------------------------------------------------------------------------------------
#Funções de Mutação
if Op_Mutacao == 5:
Populacao_Mutacao = Populacao_Cruzamento
i = 0
while (i < Tam_Populacao):
Prob_Mutacao = np.random.randint(0,Tam_Individuo)
if Prob_Mutacao <= Taxa_Mutacao:
Bit = round(np.random.random())
if Populacao_Mutacao[i, Bit] == 1:
Populacao_Mutacao[i, Bit] = 0
else:
Populacao_Mutacao[i, Bit] = 1
i = i + 1
elif Op_Mutacao == 6:
Populacao_Mutacao = Populacao_Cruzamento
i = 0
while (i < Tam_Populacao):
for j in range(Tam_Individuo):
Prob_Mutacao = np.random.randint(0,Tam_Individuo)
if Prob_Mutacao <= Taxa_Mutacao:
if Populacao_Mutacao[i, j] == 1:
Populacao_Mutacao[i,j] = 0
else:
Populacao_Mutacao[i, j] = 1
i = i + 1
#----------------------------------------------------------------------------------------------
Populacao_Inicial[:,:] = Populacao_Mutacao[:,:]
Geracao = Geracao + 1
#----------------------------------------------------------------------------------------------
#Funções que geram os resultados solicitados
Maior_Fitness_Execucao[Execucao-1,0] = np.max(Fitness)
Menor_Fitness_Execucao[Execucao-1,0] = np.min(Fitness)
Media_Fitness_Execucao[Execucao-1,0] = np.mean(Fitness)
if np.max(Fitness) == 27:
Num_sucessos = Num_sucessos + 1
Execucao = Execucao + 1
print u'ALGORTIMO GENÉTICO FINALIZADO. GERANDO OS RESULTADOS...\n'
print u'Quantidade de sucessos: ' + str(Num_sucessos)
print u'Maior valor de Fitness: ' + str(np.max(Maior_Fitness_Execucao))
print u'Menor valor de Fitness: ' + str(np.min(Menor_Fitness_Execucao))
print u'Valor Médio dos Fitness: ' + str(np.mean(Media_Fitness_Execucao))
print u'Desvio padrão: ' + str(np.std(Maior_Fitness_Execucao))
print u'Primeiro, segundo e terceiro quartil: ' + str(mquantiles(Fitness))
mpl.pyplot.boxplot(Maior_Fitness_Execucao)
mpl.pyplot.title(u'Melhores Fitness obtidos nas 100 execuções')
#Teste de Wilcoxon
#Teste_Wilcoxon = sp.stats.ranksums() | gpl-2.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/misc/longshort.py | 6 | 1656 | """
Illustrate the rec array utility funcitons by loading prices from a
csv file, computing the daily returns, appending the results to the
record arrays, joining on date
"""
import urllib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
# grab the price data off yahoo
u1 = urllib.urlretrieve('http://ichart.finance.yahoo.com/table.csv?s=AAPL&d=9&e=14&f=2008&g=d&a=8&b=7&c=1984&ignore=.csv')
u2 = urllib.urlretrieve('http://ichart.finance.yahoo.com/table.csv?s=GOOG&d=9&e=14&f=2008&g=d&a=8&b=7&c=1984&ignore=.csv')
# load the CSV files into record arrays
r1 = mlab.csv2rec(file(u1[0]))
r2 = mlab.csv2rec(file(u2[0]))
# compute the daily returns and add these columns to the arrays
gains1 = np.zeros_like(r1.adj_close)
gains2 = np.zeros_like(r2.adj_close)
gains1[1:] = np.diff(r1.adj_close)/r1.adj_close[:-1]
gains2[1:] = np.diff(r2.adj_close)/r2.adj_close[:-1]
r1 = mlab.rec_append_fields(r1, 'gains', gains1)
r2 = mlab.rec_append_fields(r2, 'gains', gains2)
# now join them by date; the default postfixes are 1 and 2. The
# default jointype is inner so it will do an intersection of dates and
# drop the dates in AAPL which occurred before GOOG started trading in
# 2004. r1 and r2 are reverse ordered by date since Yahoo returns
# most recent first in the CSV files, but rec_join will sort by key so
# r below will be properly sorted
r = mlab.rec_join('date', r1, r2)
# long appl, short goog
g = r.gains1-r.gains2
tr = (1+g).cumprod() # the total return
# plot the return
fig, ax = plt.subplots()
ax.plot(r.date, tr)
ax.set_title('total return: long APPL, short GOOG')
ax.grid()
fig.autofmt_xdate()
plt.show()
| mit |
ilyes14/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
sheabrown/ImageFilters | imagefilters.py | 1 | 2205 | # ====================================================================================
# Image filtering functions, tested on a FITS image taken from SkyView
# Used in the Astrophysical Machine Learning course at the University of Iowa
# https://astrophysicalmachinelearning.wordpress.com/ taught by Shea Brown
# Written by Shea Brown, [email protected], https://sheabrownastro.wordpress.com/
# =====================================================================================
import numpy as np
from astropy.io import fits as fits
import matplotlib.pyplot as plt
image = fits.open('coma_DSSred.fits')
image = np.flipud(image[0].data)
print(image.shape)
filt = (7, 7)
print(filt[0] / 2)
kernel = np.random.normal(size=filt)
maxa = np.zeros((300, 300))
mina = np.zeros((300, 300))
conv_array = np.zeros((300, 300))
def maxFilter(im, s):
xpix, ypix = im.shape
maxa = np.zeros(im.shape)
for i in range(s[0] / 2, xpix - s[0] / 2):
for j in range(s[1] / 2, ypix - s[1] / 2):
maxa[i, j] = np.max(im[i - s[0] / 2:i + 1 + s[0] / 2, j - s[1] / 2:j + 1 + s[1] / 2])
return maxa
def minFilter(im, s):
xpix, ypix = im.shape
mina = np.zeros(im.shape)
for i in range(s[0] / 2, xpix - s[0] / 2):
for j in range(s[1] / 2, ypix - s[1] / 2):
mina[i, j] = np.min(im[i - s[0] / 2:i + 1 + s[0] / 2, j - s[1] / 2:j + 1 + s[1] / 2])
return mina
def medianFilter(im, s):
xpix, ypix = im.shape
meda = np.zeros(im.shape)
for i in range(s[0] / 2, xpix - s[0] / 2):
for j in range(s[1] / 2, ypix - s[1] / 2):
meda[i, j] = np.median(im[i - s[0] / 2:i + 1 + s[0] / 2, j - s[1] / 2:j + 1 + s[1] / 2])
return meda
def meanFilter(im, s):
xpix, ypix = im.shape
meana = np.zeros(im.shape)
for i in range(s[0] / 2, xpix - s[0] / 2):
for j in range(s[1] / 2, ypix - s[1] / 2):
meana[i, j] = np.mean(im[i - s[0] / 2:i + 1 + s[0] / 2, j - s[1] / 2:j + 1 + s[1] / 2])
return meana
maxim = maxFilter(image, filt)
minim = minFilter(image, filt)
open = maxFilter(minim, filt)
medim = medianFilter(image, filt)
meanim = meanFilter(image, filt)
plt.imshow(image - open)
plt.show()
| mit |
fzenke/morla | scripts/compute_recommendations.py | 1 | 3418 | #!/usr/bin/python3
from __future__ import print_function
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import numpy as np
import papers.utils as utils
from sklearn.svm import LinearSVC
import gzip
import pickle
import datetime
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
import django
from django.utils import timezone
from django.db.models import Max
from django.db.models import F
from django.db.models import Q
django.setup()
from papers.models import Article, Feature, Profile, Recommendation
from compute_feature_vectors import *
min_number_of_ham = 4
consider_inactive_after_days = 60
def compute_recommendations(profile, articles, data, show_training_data=True, max_suggestions=500):
logger.info("Loading training data for profile %s..."%profile)
X_train, y_train = utils.get_training_set( profile )
logger.debug("%i samples in training set (%i positive)"%(len(y_train), (y_train>0).sum()))
# See if conditions for fit are met
if X_train.shape[0] > min_number_of_ham:
logger.debug("Fitting SVM...")
svm = LinearSVC()
svm.fit(X_train, y_train)
logger.debug("Validating...")
predictions = svm.predict(X_train)
logger.debug("%f%% train accuracy"%(100*(predictions==y_train).mean()))
logger.debug("Predicting...")
predictions = svm.predict(data)
logger.debug("%f%% relevant"%(100*(predictions==1).mean()))
logger.debug("Saving recommendations...")
recommended_articles = []
for a,p in zip(articles, predictions):
if p>0:
if show_training_data:
recommended_articles.append(a)
elif not profile.ham.filter(id=a.id).exists():
recommended_articles.append(a)
if len(recommended_articles)>=max_suggestions:
break
Recommendation.objects.filter( profile=profile ).delete()
Recommendation.objects.bulk_create( [ Recommendation( profile=profile, article=a, date_added=timezone.now() ) for a in recommended_articles ] )
logger.info("Saved %i suggestions"%(len(recommended_articles)))
# Save last prediction time to profile
profile.last_prediction_run = timezone.now()
profile.save()
if __name__ == "__main__":
logger.debug("Loading user profiles...")
# get users which need updating
qres = Article.objects.all().aggregate(Max('date_added'))
min_last_time_active = timezone.now() - datetime.timedelta(consider_inactive_after_days)
profiles = Profile.objects.filter( Q(last_time_active__lte=min_last_time_active) | Q(last_prediction_run__lte=F('last_traindata_update')) | Q(last_prediction_run__lte=qres['date_added__max']) )
# profiles = Profile.objects.all()
if profiles:
logger.info("Loading articles...")
from_date = datetime.date.today() - datetime.timedelta(356)
articles = Article.objects.filter(pubdate__gte=from_date).order_by('-pubdate')
logger.debug("Loading data ...")
data = utils.get_features_from_db(articles)
for profile in profiles:
compute_recommendations(profile, articles, data)
else:
logger.debug("Nothing to do. Exiting...")
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.