repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
CompPhysics/ComputationalPhysics2 | doc/Programs/BoltzmannMachines/VMC/python/sampling.py | 5 | 8050 | from sys import argv
from os import mkdir, path
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from matplotlib.font_manager import FontProperties
# Timing Decorator
def timeFunction(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print '%s Function Took: \t %0.3f s' % (f.func_name.title(), (time2-time1))
return ret
return wrap
class dataAnalysisClass:
# General Init functions
def __init__(self, fileName, size=0):
self.inputFileName = fileName
self.loadData(size)
self.createOutputFolder()
self.avg = np.average(self.data)
self.var = np.var(self.data)
self.std = np.std(self.data)
def loadData(self, size=0):
if size != 0:
with open(self.inputFileName) as inputFile:
self.data = np.zeros(size)
for x in xrange(size):
self.data[x] = float(next(inputFile))
else:
self.data = np.loadtxt(self.inputFileName)
# Statistical Analysis with Multiple Methods
def runAllAnalyses(self):
if len(self.data) <= 100000:
print "Autocorrelation..."
self.autocorrelation()
print "Bootstrap..."
self.bootstrap()
print "Jackknife..."
self.jackknife()
print "Blocking..."
self.blocking()
# Standard Autocorrelation
@timeFunction
def autocorrelation(self):
self.acf = np.zeros(len(self.data)/2)
for k in range(0, len(self.data)/2):
self.acf[k] = np.corrcoef(np.array([self.data[0:len(self.data)-k], \
self.data[k:len(self.data)]]))[0,1]
# Bootstrap
@timeFunction
def bootstrap(self, nBoots = 1000):
bootVec = np.zeros(nBoots)
for k in range(0,nBoots):
bootVec[k] = np.average(np.random.choice(self.data, len(self.data)))
self.bootAvg = np.average(bootVec)
self.bootVar = np.var(bootVec)
self.bootStd = np.std(bootVec)
# Jackknife
@timeFunction
def jackknife(self):
jackknVec = np.zeros(len(self.data))
for k in range(0,len(self.data)):
jackknVec[k] = np.average(np.delete(self.data, k))
self.jackknAvg = self.avg - (len(self.data) - 1) * (np.average(jackknVec) - self.avg)
self.jackknVar = float(len(self.data) - 1) * np.var(jackknVec)
self.jackknStd = np.sqrt(self.jackknVar)
# Blocking
@timeFunction
def blocking(self, blockSizeMax = 500):
blockSizeMin = 1
self.blockSizes = []
self.meanVec = []
self.varVec = []
for i in range(blockSizeMin, blockSizeMax):
if(len(self.data) % i != 0):
pass#continue
blockSize = i
meanTempVec = []
varTempVec = []
startPoint = 0
endPoint = blockSize
while endPoint <= len(self.data):
meanTempVec.append(np.average(self.data[startPoint:endPoint]))
startPoint = endPoint
endPoint += blockSize
mean, var = np.average(meanTempVec), np.var(meanTempVec)/len(meanTempVec)
self.meanVec.append(mean)
self.varVec.append(var)
self.blockSizes.append(blockSize)
self.blockingAvg = np.average(self.meanVec[-200:])
self.blockingVar = (np.average(self.varVec[-200:]))
self.blockingStd = np.sqrt(self.blockingVar)
# Plot of Data, Autocorrelation Function and Histogram
def plotAll(self):
self.createOutputFolder()
if len(self.data) <= 100000:
self.plotAutocorrelation()
self.plotData()
self.plotHistogram()
self.plotBlocking()
# Create Output Plots Folder
def createOutputFolder(self):
self.outName = self.inputFileName[:-4]
if not path.exists(self.outName):
mkdir(self.outName)
# Plot the Dataset, Mean and Std
def plotData(self):
# Far away plot
font = {'fontname':'serif'}
plt.plot(range(0, len(self.data)), self.data, 'r-', linewidth=1)
plt.plot([0, len(self.data)], [self.avg, self.avg], 'b-', linewidth=1)
plt.plot([0, len(self.data)], [self.avg + self.std, self.avg + self.std], 'g--', linewidth=1)
plt.plot([0, len(self.data)], [self.avg - self.std, self.avg - self.std], 'g--', linewidth=1)
plt.ylim(self.avg - 5*self.std, self.avg + 5*self.std)
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%.4f'))
plt.xlim(0, len(self.data))
plt.ylabel(self.outName.title() + ' Monte Carlo Evolution', **font)
plt.xlabel('MonteCarlo History', **font)
plt.title(self.outName.title(), **font)
plt.savefig(self.outName + "/data.eps")
plt.savefig(self.outName + "/data.png")
plt.clf()
# Plot Histogram of Dataset and Gaussian around it
def plotHistogram(self):
binNumber = 50
font = {'fontname':'serif'}
count, bins, ignore = plt.hist(self.data, bins=np.linspace(self.avg - 5*self.std, self.avg + 5*self.std, binNumber))
plt.plot([self.avg, self.avg], [0,np.max(count)+10], 'b-', linewidth=1)
plt.ylim(0,np.max(count)+10)
plt.ylabel(self.outName.title() + ' Histogram', **font)
plt.xlabel(self.outName.title() , **font)
plt.title('Counts', **font)
#gaussian
norm = 0
for i in range(0,len(bins)-1):
norm += (bins[i+1]-bins[i])*count[i]
plt.plot(bins, norm/(self.std * np.sqrt(2 * np.pi)) * np.exp( - (bins - self.avg)**2 / (2 * self.std**2) ), linewidth=1, color='r')
plt.savefig(self.outName + "/hist.eps")
plt.savefig(self.outName + "/hist.png")
plt.clf()
# Plot the Autocorrelation Function
def plotAutocorrelation(self):
font = {'fontname':'serif'}
plt.plot(range(1, len(self.data)/2), self.acf[1:], 'r-')
plt.ylim(-1, 1)
plt.xlim(0, len(self.data)/2)
plt.ylabel('Autocorrelation Function', **font)
plt.xlabel('Lag', **font)
plt.title('Autocorrelation', **font)
plt.savefig(self.outName + "/autocorrelation.eps")
plt.savefig(self.outName + "/autocorrelation.png")
plt.clf()
def plotBlocking(self):
font = {'fontname':'serif'}
plt.plot(self.blockSizes, self.varVec, 'r-')
plt.ylabel('Variance', **font)
plt.xlabel('Block Size', **font)
plt.title('Blocking', **font)
plt.savefig(self.outName + "/blocking.eps")
plt.savefig(self.outName + "/blocking.png")
plt.clf()
# Print Stuff to the Terminal
def printOutput(self):
print "\nSample Size: \t", len(self.data)
print "\n=========================================\n"
print "Sample Average: \t", self.avg
print "Sample Variance:\t", self.var
print "Sample Std: \t", self.std
print "\n=========================================\n"
print "Bootstrap Average: \t", self.bootAvg
print "Bootstrap Variance:\t", self.bootVar
print "Bootstrap Error: \t", self.bootStd
print "\n=========================================\n"
print "Jackknife Average: \t", self.jackknAvg
print "Jackknife Variance:\t", self.jackknVar
print "Jackknife Error: \t", self.jackknStd
print "\n=========================================\n"
print "Blocking Average: \t", self.blockingAvg
print "Blocking Variance:\t", self.blockingVar
print "Blocking Error: \t", self.blockingStd, "\n"
# Initialize the class
if len(argv) > 2:
dataAnalysis = dataAnalysisClass(argv[1], int(argv[2]))
else:
dataAnalysis = dataAnalysisClass(argv[1])
# Run Analyses
dataAnalysis.runAllAnalyses()
# Plot the data
dataAnalysis.plotAll()
# Print Some Output
dataAnalysis.printOutput()
| cc0-1.0 |
srowen/spark | python/pyspark/pandas/tests/test_series.py | 9 | 118972 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from collections import defaultdict
from distutils.version import LooseVersion
import inspect
from itertools import product
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pyspark.ml.linalg import SparseVector
from pyspark import pandas as ps
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.missing.series import MissingPandasLikeSeries
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class SeriesTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_series_ops(self):
pser = self.pser
psser = self.psser
self.assert_eq(psser + 1 + 10 * psser, pser + 1 + 10 * pser)
self.assert_eq(psser + 1 + 10 * psser.index, pser + 1 + 10 * pser.index)
self.assert_eq(psser.index + 1 + 10 * psser, pser.index + 1 + 10 * pser)
def test_series_tuple_name(self):
pser = self.pser
pser.name = ("x", "a")
psser = ps.from_pandas(pser)
self.assert_eq(psser, pser)
self.assert_eq(psser.name, pser.name)
pser.name = ("y", "z")
psser.name = ("y", "z")
self.assert_eq(psser, pser)
self.assert_eq(psser.name, pser.name)
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
s = ps.range(10)["id"]
s.__repr__()
s.rename("a", inplace=True)
self.assertEqual(s.__repr__(), s.rename("a").__repr__())
def _check_extension(self, psser, pser):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psser, pser, check_exact=False)
self.assertTrue(isinstance(psser.dtype, extension_dtypes))
else:
self.assert_eq(psser, pser)
def test_empty_series(self):
pser_a = pd.Series([], dtype="i1")
pser_b = pd.Series([], dtype="str")
self.assert_eq(ps.from_pandas(pser_a), pser_a)
psser_b = ps.from_pandas(pser_b)
self.assert_eq(psser_b, pser_b)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ps.from_pandas(pser_a), pser_a)
self.assert_eq(ps.from_pandas(pser_b), pser_b)
def test_all_null_series(self):
pser_a = pd.Series([None, None, None], dtype="float64")
pser_b = pd.Series([None, None, None], dtype="str")
self.assert_eq(ps.from_pandas(pser_a), pser_a)
psser_b = ps.from_pandas(pser_b)
self.assert_eq(psser_b, pser_b)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ps.from_pandas(pser_a), pser_a)
self.assert_eq(ps.from_pandas(pser_b), pser_b)
def test_head(self):
psser = self.psser
pser = self.pser
self.assert_eq(psser.head(3), pser.head(3))
self.assert_eq(psser.head(0), pser.head(0))
self.assert_eq(psser.head(-3), pser.head(-3))
self.assert_eq(psser.head(-10), pser.head(-10))
def test_last(self):
with self.assertRaises(TypeError):
self.psser.last("1D")
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pser = pd.Series([1, 2, 3, 4], index=index)
psser = ps.from_pandas(pser)
self.assert_eq(psser.last("1D"), pser.last("1D"))
def test_first(self):
with self.assertRaises(TypeError):
self.psser.first("1D")
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pser = pd.Series([1, 2, 3, 4], index=index)
psser = ps.from_pandas(pser)
self.assert_eq(psser.first("1D"), pser.first("1D"))
def test_rename(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
psser = ps.from_pandas(pser)
pser.name = "renamed"
psser.name = "renamed"
self.assertEqual(psser.name, "renamed")
self.assert_eq(psser, pser)
pser.name = None
psser.name = None
self.assertEqual(psser.name, None)
self.assert_eq(psser, pser)
pidx = pser.index
psidx = psser.index
pidx.name = "renamed"
psidx.name = "renamed"
self.assertEqual(psidx.name, "renamed")
self.assert_eq(psidx, pidx)
expected_error_message = "Series.name must be a hashable type"
with self.assertRaisesRegex(TypeError, expected_error_message):
psser.name = ["renamed"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psser.name = ["0", "1"]
with self.assertRaisesRegex(TypeError, expected_error_message):
ps.Series([1, 2, 3], name=["0", "1"])
def test_rename_method(self):
# Series name
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(psser.rename("y"), pser.rename("y"))
self.assertEqual(psser.name, "x") # no mutation
self.assert_eq(psser.rename(), pser.rename())
self.assert_eq((psser.rename("y") + 1).head(), (pser.rename("y") + 1).head())
psser.rename("z", inplace=True)
pser.rename("z", inplace=True)
self.assertEqual(psser.name, "z")
self.assert_eq(psser, pser)
expected_error_message = "Series.name must be a hashable type"
with self.assertRaisesRegex(TypeError, expected_error_message):
psser.rename(["0", "1"])
# Series index
# pser = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x')
# psser = ps.from_pandas(s)
# TODO: index
# res = psser.rename(lambda x: x ** 2)
# self.assert_eq(res, pser.rename(lambda x: x ** 2))
# res = psser.rename(pser)
# self.assert_eq(res, pser.rename(pser))
# res = psser.rename(psser)
# self.assert_eq(res, pser.rename(pser))
# res = psser.rename(lambda x: x**2, inplace=True)
# self.assertis(res, psser)
# s.rename(lambda x: x**2, inplace=True)
# self.assert_eq(psser, pser)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name")
psser = ps.from_pandas(pser)
self.assert_eq(
pser.rename_axis("index2").sort_index(),
psser.rename_axis("index2").sort_index(),
)
self.assert_eq(
(pser + 1).rename_axis("index2").sort_index(),
(psser + 1).rename_axis("index2").sort_index(),
)
pser2 = pser.copy()
psser2 = psser.copy()
pser2.rename_axis("index2", inplace=True)
psser2.rename_axis("index2", inplace=True)
self.assert_eq(pser2.sort_index(), psser2.sort_index())
self.assertRaises(ValueError, lambda: psser.rename_axis(["index2", "index3"]))
self.assertRaises(TypeError, lambda: psser.rename_axis(mapper=["index2"], index=["index3"]))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(),
psser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(),
)
self.assert_eq(
pser.rename_axis(index=str.upper).sort_index(),
psser.rename_axis(index=str.upper).sort_index(),
)
else:
expected = psser
expected.index.name = "index2"
result = psser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index()
self.assert_eq(expected, result)
expected = psser
expected.index.name = "INDEX"
result = psser.rename_axis(index=str.upper).sort_index()
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name")
psser = ps.from_pandas(pser)
self.assert_eq(
pser.rename_axis(["index3", "index4"]).sort_index(),
psser.rename_axis(["index3", "index4"]).sort_index(),
)
self.assertRaises(ValueError, lambda: psser.rename_axis(["index3", "index4", "index5"]))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index(),
psser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index(),
)
self.assert_eq(
pser.rename_axis(index=str.upper).sort_index(),
psser.rename_axis(index=str.upper).sort_index(),
)
else:
expected = psser
expected.index.names = ["index3", "index4"]
result = psser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index()
self.assert_eq(expected, result)
expected.index.names = ["INDEX1", "INDEX2"]
result = psser.rename_axis(index=str.upper).sort_index()
self.assert_eq(expected, result)
def test_or(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["left"] | psdf["right"], pdf["left"] | pdf["right"])
self.assert_eq(psdf["left"] | True, pdf["left"] | True)
self.assert_eq(psdf["left"] | False, pdf["left"] | False)
self.assert_eq(psdf["left"] | None, pdf["left"] | None)
self.assert_eq(True | psdf["right"], True | pdf["right"])
self.assert_eq(False | psdf["right"], False | pdf["right"])
self.assert_eq(None | psdf["right"], None | pdf["right"])
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_or_extenstion_dtypes(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
).astype("boolean")
psdf = ps.from_pandas(pdf)
self._check_extension(psdf["left"] | psdf["right"], pdf["left"] | pdf["right"])
self._check_extension(psdf["left"] | True, pdf["left"] | True)
self._check_extension(psdf["left"] | False, pdf["left"] | False)
self._check_extension(psdf["left"] | pd.NA, pdf["left"] | pd.NA)
self._check_extension(True | psdf["right"], True | pdf["right"])
self._check_extension(False | psdf["right"], False | pdf["right"])
self._check_extension(pd.NA | psdf["right"], pd.NA | pdf["right"])
def test_and(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["left"] & psdf["right"], pdf["left"] & pdf["right"])
self.assert_eq(psdf["left"] & True, pdf["left"] & True)
self.assert_eq(psdf["left"] & False, pdf["left"] & False)
self.assert_eq(psdf["left"] & None, pdf["left"] & None)
self.assert_eq(True & psdf["right"], True & pdf["right"])
self.assert_eq(False & psdf["right"], False & pdf["right"])
self.assert_eq(None & psdf["right"], None & pdf["right"])
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_and_extenstion_dtypes(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
).astype("boolean")
psdf = ps.from_pandas(pdf)
self._check_extension(psdf["left"] & psdf["right"], pdf["left"] & pdf["right"])
self._check_extension(psdf["left"] & True, pdf["left"] & True)
self._check_extension(psdf["left"] & False, pdf["left"] & False)
self._check_extension(psdf["left"] & pd.NA, pdf["left"] & pd.NA)
self._check_extension(True & psdf["right"], True & pdf["right"])
self._check_extension(False & psdf["right"], False & pdf["right"])
self._check_extension(pd.NA & psdf["right"], pd.NA & pdf["right"])
def test_to_numpy(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(psser.to_numpy(), pser.values)
def test_isin(self):
pser = pd.Series(["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal")
psser = ps.from_pandas(pser)
self.assert_eq(psser.isin(["cow", "lama"]), pser.isin(["cow", "lama"]))
self.assert_eq(psser.isin(np.array(["cow", "lama"])), pser.isin(np.array(["cow", "lama"])))
self.assert_eq(psser.isin({"cow"}), pser.isin({"cow"}))
pser = pd.Series([np.int64(1), np.int32(1), 1])
psser = ps.from_pandas(pser)
self.assert_eq(psser.isin([np.int64(1)]), pser.isin([np.int64(1)]))
msg = "only list-like objects are allowed to be passed to isin()"
with self.assertRaisesRegex(TypeError, msg):
psser.isin(1)
def test_drop_duplicates(self):
pdf = pd.DataFrame({"animal": ["lama", "cow", "lama", "beetle", "lama", "hippo"]})
psdf = ps.from_pandas(pdf)
pser = pdf.animal
psser = psdf.animal
self.assert_eq(psser.drop_duplicates().sort_index(), pser.drop_duplicates().sort_index())
self.assert_eq(
psser.drop_duplicates(keep="last").sort_index(),
pser.drop_duplicates(keep="last").sort_index(),
)
# inplace
psser.drop_duplicates(keep=False, inplace=True)
pser.drop_duplicates(keep=False, inplace=True)
self.assert_eq(psser.sort_index(), pser.sort_index())
self.assert_eq(psdf, pdf)
def test_reindex(self):
index = ["A", "B", "C", "D", "E"]
pser = pd.Series([1.0, 2.0, 3.0, 4.0, None], index=index, name="x")
psser = ps.from_pandas(pser)
self.assert_eq(pser, psser)
self.assert_eq(
pser.reindex(["A", "B"]).sort_index(),
psser.reindex(["A", "B"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "B", "2", "3"]).sort_index(),
psser.reindex(["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
psser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
)
self.assertRaises(TypeError, lambda: psser.reindex(index=123))
def test_reindex_like(self):
data = [1.0, 2.0, None]
index = pd.Index(["A", "B", "C"], name="index1")
pser = pd.Series(data=data, index=index, name="name1")
psser = ps.from_pandas(pser)
# Reindexing single Index on single Index
data2 = [3.0, None, 4.0]
index2 = pd.Index(["A", "C", "D"], name="index2")
pser2 = pd.Series(data=data2, index=index2, name="name2")
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser.reindex_like(pser2).sort_index(),
psser.reindex_like(psser2).sort_index(),
)
self.assert_eq(
(pser + 1).reindex_like(pser2).sort_index(),
(psser + 1).reindex_like(psser2).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["index3", "index4"]
)
pser2 = pd.Series(data=data2, index=index2, name="name2")
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser.reindex_like(pser2).sort_index(),
psser.reindex_like(psser2).sort_index(),
)
self.assertRaises(TypeError, lambda: psser.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psser2.reindex_like(psser))
# Reindexing MultiIndex on MultiIndex
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
pser = pd.Series(data=data, index=index, name="name1")
psser = ps.from_pandas(pser)
self.assert_eq(
pser.reindex_like(pser2).sort_index(),
psser.reindex_like(psser2).sort_index(),
)
# Reindexing with DataFrame
index2 = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name3", "name4"]
)
pdf = pd.DataFrame(data=data, index=index2)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pser.reindex_like(pdf).sort_index(),
psser.reindex_like(psdf).sort_index(),
)
def test_fillna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.fillna(0), pser.fillna(0))
self.assert_eq(psser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0))
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
# test considering series does not have NA/NaN values
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser, pser)
psser = psdf.x.rename("y")
pser = pdf.x.rename("y")
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser.head(), pser.head())
pser = pd.Series([1, 2, 3, 4, 5, 6], name="x")
psser = ps.from_pandas(pser)
pser.loc[3] = np.nan
psser.loc[3] = np.nan
self.assert_eq(psser.fillna(0), pser.fillna(0))
self.assert_eq(psser.fillna(method="ffill"), pser.fillna(method="ffill"))
self.assert_eq(psser.fillna(method="bfill"), pser.fillna(method="bfill"))
# inplace fillna on non-nullable column
pdf = pd.DataFrame({"a": [1, 2, None], "b": [1, 2, 3]})
psdf = ps.from_pandas(pdf)
pser = pdf.b
psser = psdf.b
self.assert_eq(psser.fillna(0), pser.fillna(0))
self.assert_eq(psser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0))
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_dropna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.dropna(), pser.dropna())
pser.dropna(inplace=True)
psser.dropna(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_nunique(self):
pser = pd.Series([1, 2, 1, np.nan])
psser = ps.from_pandas(pser)
# Assert NaNs are dropped by default
nunique_result = psser.nunique()
self.assertEqual(nunique_result, 2)
self.assert_eq(nunique_result, pser.nunique())
# Assert including NaN values
nunique_result = psser.nunique(dropna=False)
self.assertEqual(nunique_result, 3)
self.assert_eq(nunique_result, pser.nunique(dropna=False))
# Assert approximate counts
self.assertEqual(ps.Series(range(100)).nunique(approx=True), 103)
self.assertEqual(ps.Series(range(100)).nunique(approx=True, rsd=0.01), 100)
def test_value_counts(self):
# this is also containing test for Index & MultiIndex
pser = pd.Series(
[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
index=[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
name="x",
)
psser = ps.from_pandas(pser)
exp = pser.value_counts()
res = psser.value_counts()
self.assertEqual(res.name, exp.name)
self.assert_eq(res, exp)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
with self.assertRaisesRegex(
NotImplementedError, "value_counts currently does not support bins"
):
psser.value_counts(bins=3)
pser.name = "index"
psser.name = "index"
self.assert_eq(psser.value_counts(), pser.value_counts())
# Series from DataFrame
pdf = pd.DataFrame({"a": [2, 2, 3], "b": [None, 1, None]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.value_counts(normalize=True), pdf.a.value_counts(normalize=True))
self.assert_eq(psdf.a.value_counts(ascending=True), pdf.a.value_counts(ascending=True))
self.assert_eq(
psdf.a.value_counts(normalize=True, dropna=False),
pdf.a.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psdf.a.value_counts(ascending=True, dropna=False),
pdf.a.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with NaN index
pser = pd.Series([3, 2, 3, 1, 2, 3], index=[2.0, None, 5.0, 5.0, None, 5.0])
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
psser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index has NaN
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", None), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
psser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index is NaN.
# This test only available for pandas >= 0.24.
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), None, ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
psser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
def test_nsmallest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
psser = ps.Series(sample_lst, name="x")
self.assert_eq(psser.nsmallest(n=3), pser.nsmallest(n=3))
self.assert_eq(psser.nsmallest(), pser.nsmallest())
self.assert_eq((psser + 1).nsmallest(), (pser + 1).nsmallest())
def test_nlargest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
psser = ps.Series(sample_lst, name="x")
self.assert_eq(psser.nlargest(n=3), pser.nlargest(n=3))
self.assert_eq(psser.nlargest(), pser.nlargest())
self.assert_eq((psser + 1).nlargest(), (pser + 1).nlargest())
def test_notnull(self):
pser = pd.Series([1, 2, 3, 4, np.nan, 6], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(psser.notnull(), pser.notnull())
pser = self.pser
psser = self.psser
self.assert_eq(psser.notnull(), pser.notnull())
def test_all(self):
for pser in [
pd.Series([True, True], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
psser = ps.from_pandas(pser)
self.assert_eq(psser.all(), pser.all())
pser = pd.Series([1, 2, 3, 4], name="x")
psser = ps.from_pandas(pser)
self.assert_eq((psser % 2 == 0).all(), (pser % 2 == 0).all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psser.all(axis=1)
def test_any(self):
for pser in [
pd.Series([False, False], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
psser = ps.from_pandas(pser)
self.assert_eq(psser.any(), pser.any())
pser = pd.Series([1, 2, 3, 4], name="x")
psser = ps.from_pandas(pser)
self.assert_eq((psser % 2 == 0).any(), (pser % 2 == 0).any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psser.any(axis=1)
def test_reset_index(self):
pdf = pd.DataFrame({"foo": [1, 2, 3, 4]}, index=pd.Index(["a", "b", "c", "d"], name="idx"))
psdf = ps.from_pandas(pdf)
pser = pdf.foo
psser = psdf.foo
self.assert_eq(psser.reset_index(), pser.reset_index())
self.assert_eq(psser.reset_index(name="values"), pser.reset_index(name="values"))
self.assert_eq(psser.reset_index(drop=True), pser.reset_index(drop=True))
# inplace
psser.reset_index(drop=True, inplace=True)
pser.reset_index(drop=True, inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_reset_index_with_default_index_types(self):
pser = pd.Series([1, 2, 3], name="0", index=np.random.rand(3))
psser = ps.from_pandas(pser)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psser.reset_index(), pser.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
# the order might be changed.
self.assert_eq(psser.reset_index().sort_index(), pser.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(
psser.reset_index().to_pandas().reset_index(drop=True), pser.reset_index()
)
def test_index_to_series_reset_index(self):
def check(psser, pser):
self.assert_eq(psser.reset_index(), pser.reset_index())
self.assert_eq(psser.reset_index(drop=True), pser.reset_index(drop=True))
pser.reset_index(drop=True, inplace=True)
psser.reset_index(drop=True, inplace=True)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
check(psdf.index.to_series(), pdf.index.to_series())
check(psdf.index.to_series(name="a"), pdf.index.to_series(name="a"))
check(psdf.index.to_series(name=("x", "a")), pdf.index.to_series(name=("x", "a")))
def test_sort_values(self):
pdf = pd.DataFrame({"x": [1, 2, 3, 4, 5, None, 7]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.sort_values(), pser.sort_values())
self.assert_eq(psser.sort_values(ascending=False), pser.sort_values(ascending=False))
self.assert_eq(
psser.sort_values(na_position="first"), pser.sort_values(na_position="first")
)
self.assertRaises(ValueError, lambda: psser.sort_values(na_position="invalid"))
# inplace
# pandas raises an exception when the Series is derived from DataFrame
psser.sort_values(inplace=True)
self.assert_eq(psser, pser.sort_values())
self.assert_eq(psdf, pdf)
pser = pdf.x.copy()
psser = psdf.x.copy()
psser.sort_values(inplace=True)
pser.sort_values(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_sort_index(self):
pdf = pd.DataFrame({"x": [2, 1, np.nan]}, index=["b", "a", np.nan])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psser.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psser.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psser.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psser.sort_index(), pser.sort_index())
# Assert sorting descending
self.assert_eq(psser.sort_index(ascending=False), pser.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psser.sort_index(na_position="first"), pser.sort_index(na_position="first"))
# Assert sorting inplace
# pandas sorts pdf.x by the index and update the column only
# when the Series is derived from DataFrame.
psser.sort_index(inplace=True)
self.assert_eq(psser, pser.sort_index())
self.assert_eq(psdf, pdf)
pser = pdf.x.copy()
psser = psdf.x.copy()
psser.sort_index(inplace=True)
pser.sort_index(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
# Assert multi-indices
pser = pd.Series(range(4), index=[["b", "b", "a", "a"], [1, 0, 1, 0]], name="0")
psser = ps.from_pandas(pser)
self.assert_eq(psser.sort_index(), pser.sort_index())
self.assert_eq(psser.sort_index(level=[1, 0]), pser.sort_index(level=[1, 0]))
self.assert_eq(psser.reset_index().sort_index(), pser.reset_index().sort_index())
def test_to_datetime(self):
pser = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100)
psser = ps.from_pandas(pser)
self.assert_eq(
pd.to_datetime(pser, infer_datetime_format=True),
ps.to_datetime(psser, infer_datetime_format=True),
)
def test_missing(self):
psser = self.psser
missing_functions = inspect.getmembers(MissingPandasLikeSeries, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psser, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Series.*{}.*is deprecated".format(name)
):
getattr(psser, name)()
missing_properties = inspect.getmembers(
MissingPandasLikeSeries, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psser, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Series.*{}.*is deprecated".format(name)
):
getattr(psser, name)
def test_clip(self):
pser = pd.Series([0, 2, 4], index=np.random.rand(3))
psser = ps.from_pandas(pser)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psser.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psser.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psser.clip(), pser.clip())
# Assert lower only
self.assert_eq(psser.clip(1), pser.clip(1))
# Assert upper only
self.assert_eq(psser.clip(upper=3), pser.clip(upper=3))
# Assert lower and upper
self.assert_eq(psser.clip(1, 3), pser.clip(1, 3))
# Assert behavior on string values
str_psser = ps.Series(["a", "b", "c"])
self.assert_eq(str_psser.clip(1, 3), str_psser)
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser = pd.Series([1, 2])
psser = ps.from_pandas(pser)
res_psdf = psser.compare(psser)
self.assertTrue(res_psdf.empty)
self.assert_eq(res_psdf.columns, pd.Index(["self", "other"]))
self.assert_eq(
pser.compare(pser + 1).sort_index(), psser.compare(psser + 1).sort_index()
)
pser = pd.Series([1, 2], index=["x", "y"])
psser = ps.from_pandas(pser)
self.assert_eq(
pser.compare(pser + 1).sort_index(), psser.compare(psser + 1).sort_index()
)
else:
psser = ps.Series([1, 2])
res_psdf = psser.compare(psser)
self.assertTrue(res_psdf.empty)
self.assert_eq(res_psdf.columns, pd.Index(["self", "other"]))
expected = ps.DataFrame([[1, 2], [2, 3]], columns=["self", "other"])
self.assert_eq(expected, psser.compare(psser + 1).sort_index())
psser = ps.Series([1, 2], index=["x", "y"])
expected = ps.DataFrame([[1, 2], [2, 3]], index=["x", "y"], columns=["self", "other"])
self.assert_eq(expected, psser.compare(psser + 1).sort_index())
def test_is_unique(self):
# We can't use pandas' is_unique for comparison. pandas 0.23 ignores None
pser = pd.Series([1, 2, 2, None, None])
psser = ps.from_pandas(pser)
self.assertEqual(False, psser.is_unique)
self.assertEqual(False, (psser + 1).is_unique)
pser = pd.Series([1, None, None])
psser = ps.from_pandas(pser)
self.assertEqual(False, psser.is_unique)
self.assertEqual(False, (psser + 1).is_unique)
pser = pd.Series([1])
psser = ps.from_pandas(pser)
self.assertEqual(pser.is_unique, psser.is_unique)
self.assertEqual((pser + 1).is_unique, (psser + 1).is_unique)
pser = pd.Series([1, 1, 1])
psser = ps.from_pandas(pser)
self.assertEqual(pser.is_unique, psser.is_unique)
self.assertEqual((pser + 1).is_unique, (psser + 1).is_unique)
def test_to_list(self):
self.assert_eq(self.psser.tolist(), self.pser.tolist())
def test_append(self):
pser1 = pd.Series([1, 2, 3], name="0")
pser2 = pd.Series([4, 5, 6], name="0")
pser3 = pd.Series([4, 5, 6], index=[3, 4, 5], name="0")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
psser3 = ps.from_pandas(pser3)
self.assert_eq(psser1.append(psser2), pser1.append(pser2))
self.assert_eq(psser1.append(psser3), pser1.append(pser3))
self.assert_eq(
psser1.append(psser2, ignore_index=True), pser1.append(pser2, ignore_index=True)
)
psser1.append(psser3, verify_integrity=True)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psser1.append(psser2, verify_integrity=True)
def test_map(self):
pser = pd.Series(["cat", "dog", None, "rabbit"])
psser = ps.from_pandas(pser)
# Currently Koalas doesn't return NaN as pandas does.
self.assert_eq(psser.map({}), pser.map({}).replace({pd.np.nan: None}))
d = defaultdict(lambda: "abc")
self.assertTrue("abc" in repr(psser.map(d)))
self.assert_eq(psser.map(d), pser.map(d))
def tomorrow(date) -> datetime:
return date + timedelta(days=1)
pser = pd.Series([datetime(2019, 10, 24)])
psser = ps.from_pandas(pser)
self.assert_eq(psser.map(tomorrow), pser.map(tomorrow))
def test_add_prefix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), psser.add_prefix("item_"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), psser.add_prefix("item_"))
def test_add_suffix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), psser.add_suffix("_item"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), psser.add_suffix("_item"))
def test_cummin(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummin(), psser.cummin())
self.assert_eq(pser.cummin(skipna=False), psser.cummin(skipna=False))
self.assert_eq(pser.cummin().sum(), psser.cummin().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummin(), psser.cummin())
self.assert_eq(pser.cummin(skipna=False), psser.cummin(skipna=False))
def test_cummax(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummax(), psser.cummax())
self.assert_eq(pser.cummax(skipna=False), psser.cummax(skipna=False))
self.assert_eq(pser.cummax().sum(), psser.cummax().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummax(), psser.cummax())
self.assert_eq(pser.cummax(skipna=False), psser.cummax(skipna=False))
def test_cumsum(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumsum(), psser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), psser.cumsum(skipna=False))
self.assert_eq(pser.cumsum().sum(), psser.cumsum().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumsum(), psser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), psser.cumsum(skipna=False))
# bool
pser = pd.Series([True, True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumsum().astype(int), psser.cumsum())
self.assert_eq(pser.cumsum(skipna=False).astype(int), psser.cumsum(skipna=False))
def test_cumprod(self):
pser = pd.Series([1.0, None, 1.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), psser.cumprod().sum())
# with integer type
pser = pd.Series([1, 10, 1, 4, 9])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), psser.cumprod().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
# including zero
pser = pd.Series([1, 2, 0, 3])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
# including negative values
pser = pd.Series([1, -1, -2])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
# bool
pser = pd.Series([True, True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False).astype(int), psser.cumprod(skipna=False))
def test_median(self):
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).median(accuracy="a")
def test_rank(self):
pser = pd.Series([1, 2, 3, 1], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(pser.rank(), psser.rank().sort_index())
self.assert_eq(pser.rank().sum(), psser.rank().sum())
self.assert_eq(pser.rank(ascending=False), psser.rank(ascending=False).sort_index())
self.assert_eq(pser.rank(method="min"), psser.rank(method="min").sort_index())
self.assert_eq(pser.rank(method="max"), psser.rank(method="max").sort_index())
self.assert_eq(pser.rank(method="first"), psser.rank(method="first").sort_index())
self.assert_eq(pser.rank(method="dense"), psser.rank(method="dense").sort_index())
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psser.rank(method="nothing")
def test_round(self):
pser = pd.Series([0.028208, 0.038683, 0.877076], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(pser.round(2), psser.round(2))
msg = "decimals must be an integer"
with self.assertRaisesRegex(TypeError, msg):
psser.round(1.5)
def test_quantile(self):
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(psser.quantile(0.5), pser.quantile(0.5))
self.assert_eq(psser.quantile([0.25, 0.5, 0.75]), pser.quantile([0.25, 0.5, 0.75]))
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(accuracy="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q=["a"])
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
ps.Series(["a", "b", "c"]).quantile()
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
ps.Series(["a", "b", "c"]).quantile([0.25, 0.5, 0.75])
def test_idxmax(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
psser = ps.Series(pser)
self.assertEqual(psser.idxmax(), pser.idxmax())
self.assertEqual(psser.idxmax(skipna=False), pser.idxmax(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
psser = ps.Series(pser)
self.assertEqual(psser.idxmax(), pser.idxmax())
self.assertEqual(psser.idxmax(skipna=False), pser.idxmax(skipna=False))
psser = ps.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
psser.idxmax()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
psser = ps.Series(pser)
self.assertEqual(psser.idxmax(), pser.idxmax())
self.assertEqual(repr(psser.idxmax(skipna=False)), repr(pser.idxmax(skipna=False)))
def test_idxmin(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
psser = ps.Series(pser)
self.assertEqual(psser.idxmin(), pser.idxmin())
self.assertEqual(psser.idxmin(skipna=False), pser.idxmin(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
psser = ps.Series(pser)
self.assertEqual(psser.idxmin(), pser.idxmin())
self.assertEqual(psser.idxmin(skipna=False), pser.idxmin(skipna=False))
psser = ps.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
psser.idxmin()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
psser = ps.Series(pser)
self.assertEqual(psser.idxmin(), pser.idxmin())
self.assertEqual(repr(psser.idxmin(skipna=False)), repr(pser.idxmin(skipna=False)))
def test_shift(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.shift(2), pser.shift(2))
self.assert_eq(psser.shift().shift(-1), pser.shift().shift(-1))
self.assert_eq(psser.shift().sum(), pser.shift().sum())
if LooseVersion(pd.__version__) < LooseVersion("0.24.2"):
self.assert_eq(psser.shift(periods=2), pser.shift(periods=2))
else:
self.assert_eq(
psser.shift(periods=2, fill_value=0), pser.shift(periods=2, fill_value=0)
)
with self.assertRaisesRegex(TypeError, "periods should be an int; however"):
psser.shift(periods=1.5)
def test_diff(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.diff(2), pser.diff(2))
self.assert_eq(psser.diff().diff(-1), pser.diff().diff(-1))
self.assert_eq(psser.diff().sum(), pser.diff().sum())
def _test_numeric_astype(self, pser):
psser = ps.Series(pser)
self.assert_eq(psser.astype(int), pser.astype(int))
self.assert_eq(psser.astype(np.int), pser.astype(np.int))
self.assert_eq(psser.astype(np.int8), pser.astype(np.int8))
self.assert_eq(psser.astype(np.int16), pser.astype(np.int16))
self.assert_eq(psser.astype(np.int32), pser.astype(np.int32))
self.assert_eq(psser.astype(np.int64), pser.astype(np.int64))
self.assert_eq(psser.astype(np.byte), pser.astype(np.byte))
self.assert_eq(psser.astype("int"), pser.astype("int"))
self.assert_eq(psser.astype("int8"), pser.astype("int8"))
self.assert_eq(psser.astype("int16"), pser.astype("int16"))
self.assert_eq(psser.astype("int32"), pser.astype("int32"))
self.assert_eq(psser.astype("int64"), pser.astype("int64"))
self.assert_eq(psser.astype("b"), pser.astype("b"))
self.assert_eq(psser.astype("byte"), pser.astype("byte"))
self.assert_eq(psser.astype("i"), pser.astype("i"))
self.assert_eq(psser.astype("long"), pser.astype("long"))
self.assert_eq(psser.astype("short"), pser.astype("short"))
self.assert_eq(psser.astype(np.float), pser.astype(np.float))
self.assert_eq(psser.astype(np.float32), pser.astype(np.float32))
self.assert_eq(psser.astype(np.float64), pser.astype(np.float64))
self.assert_eq(psser.astype("float"), pser.astype("float"))
self.assert_eq(psser.astype("float32"), pser.astype("float32"))
self.assert_eq(psser.astype("float64"), pser.astype("float64"))
self.assert_eq(psser.astype("double"), pser.astype("double"))
self.assert_eq(psser.astype("f"), pser.astype("f"))
self.assert_eq(psser.astype(bool), pser.astype(bool))
self.assert_eq(psser.astype("bool"), pser.astype("bool"))
self.assert_eq(psser.astype("?"), pser.astype("?"))
self.assert_eq(psser.astype(np.unicode_), pser.astype(np.unicode_))
self.assert_eq(psser.astype("str"), pser.astype("str"))
self.assert_eq(psser.astype("U"), pser.astype("U"))
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
self._check_extension(psser.astype("Int8"), pser.astype("Int8"))
self._check_extension(psser.astype("Int16"), pser.astype("Int16"))
self._check_extension(psser.astype("Int32"), pser.astype("Int32"))
self._check_extension(psser.astype("Int64"), pser.astype("Int64"))
self._check_extension(psser.astype(Int8Dtype()), pser.astype(Int8Dtype()))
self._check_extension(psser.astype(Int16Dtype()), pser.astype(Int16Dtype()))
self._check_extension(psser.astype(Int32Dtype()), pser.astype(Int32Dtype()))
self._check_extension(psser.astype(Int64Dtype()), pser.astype(Int64Dtype()))
if extension_object_dtypes_available:
from pandas import StringDtype
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self._check_extension(psser.astype("string"), pser.astype("string"))
self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype()))
else:
self._check_extension(
psser.astype("string"),
pd.Series(["10", "20", "15", "30", "45"], name="x", dtype="string"),
)
self._check_extension(
psser.astype(StringDtype()),
pd.Series(["10", "20", "15", "30", "45"], name="x", dtype=StringDtype()),
)
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
self._check_extension(psser.astype("Float32"), pser.astype("Float32"))
self._check_extension(psser.astype("Float64"), pser.astype("Float64"))
self._check_extension(psser.astype(Float32Dtype()), pser.astype(Float32Dtype()))
self._check_extension(psser.astype(Float64Dtype()), pser.astype(Float64Dtype()))
def test_astype(self):
psers = [pd.Series([10, 20, 15, 30, 45], name="x")]
if extension_dtypes_available:
psers.append(pd.Series([10, 20, 15, 30, 45], name="x", dtype="Int64"))
if extension_float_dtypes_available:
psers.append(pd.Series([10, 20, 15, 30, 45], name="x", dtype="Float64"))
for pser in psers:
self._test_numeric_astype(pser)
pser = pd.Series([10, 20, 15, 30, 45, None, np.nan], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(bool), pser.astype(bool))
self.assert_eq(psser.astype(str), pser.astype(str))
pser = pd.Series(["hi", "hi ", " ", " \t", "", None], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(bool), pser.astype(bool))
if LooseVersion("1.1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.1.4"):
# a pandas bug: https://github.com/databricks/koalas/pull/1818#issuecomment-703961980
self.assert_eq(psser.astype(str).tolist(), ["hi", "hi ", " ", " \t", "", "None"])
else:
self.assert_eq(psser.astype(str), pser.astype(str))
self.assert_eq(psser.str.strip().astype(bool), pser.str.strip().astype(bool))
if extension_object_dtypes_available:
from pandas import StringDtype
self._check_extension(psser.astype("string"), pser.astype("string"))
self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype()))
pser = pd.Series([True, False, None], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(bool), pser.astype(bool))
self.assert_eq(psser.astype(str), pser.astype(str))
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
self._check_extension(psser.astype("boolean"), pser.astype("boolean"))
self._check_extension(psser.astype(BooleanDtype()), pser.astype(BooleanDtype()))
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self._check_extension(psser.astype("string"), pser.astype("string"))
self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype()))
else:
self._check_extension(
psser.astype("string"),
pd.Series(["True", "False", None], name="x", dtype="string"),
)
self._check_extension(
psser.astype(StringDtype()),
pd.Series(["True", "False", None], name="x", dtype=StringDtype()),
)
pser = pd.Series(["2020-10-27 00:00:01", None], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(np.datetime64), pser.astype(np.datetime64))
self.assert_eq(psser.astype("datetime64[ns]"), pser.astype("datetime64[ns]"))
self.assert_eq(psser.astype("M"), pser.astype("M"))
self.assert_eq(psser.astype("M").astype(str), pser.astype("M").astype(str))
# Comment out the below test cause because pandas returns `NaT` or `nan` randomly
# self.assert_eq(
# psser.astype("M").dt.date.astype(str), pser.astype("M").dt.date.astype(str)
# )
if extension_object_dtypes_available:
from pandas import StringDtype
self._check_extension(
psser.astype("M").astype("string"), pser.astype("M").astype("string")
)
self._check_extension(
psser.astype("M").astype(StringDtype()), pser.astype("M").astype(StringDtype())
)
with self.assertRaisesRegex(TypeError, "not understood"):
psser.astype("int63")
def test_aggregate(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
msg = "func must be a string or list of strings"
with self.assertRaisesRegex(TypeError, msg):
psser.aggregate({"x": ["min", "max"]})
msg = (
"If the given function is a list, it " "should only contains function names as strings."
)
with self.assertRaisesRegex(ValueError, msg):
psser.aggregate(["min", max])
def test_drop(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.drop(1), pser.drop(1))
self.assert_eq(psser.drop([1, 4]), pser.drop([1, 4]))
msg = "Need to specify at least one of 'labels' or 'index'"
with self.assertRaisesRegex(ValueError, msg):
psser.drop()
self.assertRaises(KeyError, lambda: psser.drop((0, 1)))
# For MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.drop("lama"), pser.drop("lama"))
self.assert_eq(psser.drop(labels="weight", level=1), pser.drop(labels="weight", level=1))
self.assert_eq(psser.drop(("lama", "weight")), pser.drop(("lama", "weight")))
self.assert_eq(
psser.drop([("lama", "speed"), ("falcon", "weight")]),
pser.drop([("lama", "speed"), ("falcon", "weight")]),
)
self.assert_eq(psser.drop({"lama": "speed"}), pser.drop({"lama": "speed"}))
msg = "'level' should be less than the number of indexes"
with self.assertRaisesRegex(ValueError, msg):
psser.drop(labels="weight", level=2)
msg = (
"If the given index is a list, it "
"should only contains names as all tuples or all non tuples "
"that contain index names"
)
with self.assertRaisesRegex(ValueError, msg):
psser.drop(["lama", ["cow", "falcon"]])
msg = "Cannot specify both 'labels' and 'index'"
with self.assertRaisesRegex(ValueError, msg):
psser.drop("lama", index="cow")
msg = r"'Key length \(2\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psser.drop(("lama", "speed", "x"))
def test_pop(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pdf = pd.DataFrame({"x": [45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3]}, index=midx)
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.pop(("lama", "speed")), pser.pop(("lama", "speed")))
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
msg = r"'Key length \(3\) exceeds index depth \(2\)'"
with self.assertRaisesRegex(KeyError, msg):
psser.pop(("lama", "speed", "x"))
def test_replace(self):
pser = pd.Series([10, 20, 15, 30, np.nan], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.replace(), pser.replace())
self.assert_eq(psser.replace({}), pser.replace({}))
self.assert_eq(psser.replace(np.nan, 45), pser.replace(np.nan, 45))
self.assert_eq(psser.replace([10, 15], 45), pser.replace([10, 15], 45))
self.assert_eq(psser.replace((10, 15), 45), pser.replace((10, 15), 45))
self.assert_eq(psser.replace([10, 15], [45, 50]), pser.replace([10, 15], [45, 50]))
self.assert_eq(psser.replace((10, 15), (45, 50)), pser.replace((10, 15), (45, 50)))
msg = "'to_replace' should be one of str, list, tuple, dict, int, float"
with self.assertRaisesRegex(TypeError, msg):
psser.replace(ps.range(5))
msg = "Replacement lists must match in length. Expecting 3 got 2"
with self.assertRaisesRegex(ValueError, msg):
psser.replace([10, 20, 30], [1, 2])
msg = "replace currently not support for regex"
with self.assertRaisesRegex(NotImplementedError, msg):
psser.replace(r"^1.$", regex=True)
def test_xs(self):
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.xs(("a", "lama", "speed")), pser.xs(("a", "lama", "speed")))
def test_duplicates(self):
psers = {
"test on texts": pd.Series(
["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal"
),
"test on numbers": pd.Series([1, 1, 2, 4, 3]),
}
keeps = ["first", "last", False]
for (msg, pser), keep in product(psers.items(), keeps):
with self.subTest(msg, keep=keep):
psser = ps.Series(pser)
self.assert_eq(
pser.drop_duplicates(keep=keep).sort_values(),
psser.drop_duplicates(keep=keep).sort_values(),
)
def test_update(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
msg = "'other' must be a Series"
with self.assertRaisesRegex(TypeError, msg):
psser.update(10)
def test_where(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
psser1 = ps.from_pandas(pser1)
self.assert_eq(pser1.where(pser1 > 3), psser1.where(psser1 > 3).sort_index())
def test_mask(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
psser1 = ps.from_pandas(pser1)
self.assert_eq(pser1.mask(pser1 > 3), psser1.mask(psser1 > 3).sort_index())
def test_truncate(self):
pser1 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
psser1 = ps.Series(pser1)
pser2 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[7, 6, 5, 4, 3, 2, 1])
psser2 = ps.Series(pser2)
self.assert_eq(psser1.truncate(), pser1.truncate())
self.assert_eq(psser1.truncate(before=2), pser1.truncate(before=2))
self.assert_eq(psser1.truncate(after=5), pser1.truncate(after=5))
self.assert_eq(psser1.truncate(copy=False), pser1.truncate(copy=False))
self.assert_eq(psser1.truncate(2, 5, copy=False), pser1.truncate(2, 5, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psser2.truncate(4, 6), pser2.truncate(4, 6))
self.assert_eq(psser2.truncate(4, 6, copy=False), pser2.truncate(4, 6, copy=False))
else:
expected_psser = ps.Series([20, 30, 40], index=[6, 5, 4])
self.assert_eq(psser2.truncate(4, 6), expected_psser)
self.assert_eq(psser2.truncate(4, 6, copy=False), expected_psser)
psser = ps.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 3, 2, 1])
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
psser.truncate()
psser = ps.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
msg = "Truncate: 2 must be after 5"
with self.assertRaisesRegex(ValueError, msg):
psser.truncate(5, 2)
def test_getitem(self):
pser = pd.Series([10, 20, 15, 30, 45], ["A", "A", "B", "C", "D"])
psser = ps.Series(pser)
self.assert_eq(psser["A"], pser["A"])
self.assert_eq(psser["B"], pser["B"])
self.assert_eq(psser[psser > 15], pser[pser > 15])
# for MultiIndex
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], name="0", index=midx)
psser = ps.Series(pser)
self.assert_eq(psser["a"], pser["a"])
self.assert_eq(psser["a", "lama"], pser["a", "lama"])
self.assert_eq(psser[psser > 1.5], pser[pser > 1.5])
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psser[("a", "lama", "speed", "x")]
def test_keys(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.keys(), pser.keys())
def test_index(self):
# to check setting name of Index properly.
idx = pd.Index([1, 2, 3, 4, 5, 6, 7, 8, 9])
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=idx)
psser = ps.from_pandas(pser)
psser.name = "koalas"
pser.name = "koalas"
self.assert_eq(psser.index.name, pser.index.name)
# for check setting names of MultiIndex properly.
psser.names = ["hello", "koalas"]
pser.names = ["hello", "koalas"]
self.assert_eq(psser.index.names, pser.index.names)
def test_pct_change(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
psser = ps.from_pandas(pser)
self.assert_eq(psser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(psser.pct_change().sum(), pser.pct_change().sum(), almost=True)
self.assert_eq(psser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(psser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(psser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(psser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(psser.pct_change().sum(), pser.pct_change().sum(), almost=True)
self.assert_eq(psser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(psser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(psser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(psser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
def test_axes(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
psser = ps.from_pandas(pser)
self.assert_eq(psser.axes, pser.axes)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.axes, pser.axes)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pser = pd.Series([sparse_vector])
psser = ps.from_pandas(pser)
self.assert_eq(psser, pser)
def test_repeat(self):
pser = pd.Series(["a", "b", "c"], name="0", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(psser.repeat(3).sort_index(), pser.repeat(3).sort_index())
self.assert_eq(psser.repeat(0).sort_index(), pser.repeat(0).sort_index())
self.assertRaises(ValueError, lambda: psser.repeat(-1))
self.assertRaises(TypeError, lambda: psser.repeat("abc"))
pdf = pd.DataFrame({"a": ["a", "b", "c"], "rep": [10, 20, 30]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.repeat(psdf.rep).sort_index(), pdf.a.repeat(pdf.rep).sort_index())
def test_take(self):
pser = pd.Series([100, 200, 300, 400, 500], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.take([0, 2, 4]).sort_values(), pser.take([0, 2, 4]).sort_values())
self.assert_eq(
psser.take(range(0, 5, 2)).sort_values(), pser.take(range(0, 5, 2)).sort_values()
)
self.assert_eq(psser.take([-4, -2, 0]).sort_values(), pser.take([-4, -2, 0]).sort_values())
self.assert_eq(
psser.take(range(-2, 1, 2)).sort_values(), pser.take(range(-2, 1, 2)).sort_values()
)
# Checking the type of indices.
self.assertRaises(TypeError, lambda: psser.take(1))
self.assertRaises(TypeError, lambda: psser.take("1"))
self.assertRaises(TypeError, lambda: psser.take({1, 2}))
self.assertRaises(TypeError, lambda: psser.take({1: None, 2: None}))
def test_divmod(self):
pser = pd.Series([100, None, 300, None, 500], name="Koalas")
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
kdiv, kmod = psser.divmod(-100)
pdiv, pmod = pser.divmod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = psser.divmod(100)
pdiv, pmod = pser.divmod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
kdiv, kmod = psser.divmod(-100)
pdiv, pmod = pser.floordiv(-100), pser.mod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = psser.divmod(100)
pdiv, pmod = pser.floordiv(100), pser.mod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
def test_rdivmod(self):
pser = pd.Series([100, None, 300, None, 500])
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
krdiv, krmod = psser.rdivmod(-100)
prdiv, prmod = pser.rdivmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = psser.rdivmod(100)
prdiv, prmod = pser.rdivmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
krdiv, krmod = psser.rdivmod(-100)
prdiv, prmod = pser.rfloordiv(-100), pser.rmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = psser.rdivmod(100)
prdiv, prmod = pser.rfloordiv(100), pser.rmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.mod(-150), pser.mod(-150))
self.assert_eq(psser.mod(0), pser.mod(0))
self.assert_eq(psser.mod(150), pser.mod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.mod(psdf.b), pdf.a.mod(pdf.b))
def test_mode(self):
pser = pd.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(psser.mode(), pser.mode())
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `dropna` argument is added in pandas 0.24.
self.assert_eq(
psser.mode(dropna=False).sort_values().reset_index(drop=True),
pser.mode(dropna=False).sort_values().reset_index(drop=True),
)
pser.name = "x"
psser = ps.from_pandas(pser)
self.assert_eq(psser.mode(), pser.mode())
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `dropna` argument is added in pandas 0.24.
self.assert_eq(
psser.mode(dropna=False).sort_values().reset_index(drop=True),
pser.mode(dropna=False).sort_values().reset_index(drop=True),
)
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.rmod(-150), pser.rmod(-150))
self.assert_eq(psser.rmod(0), pser.rmod(0))
self.assert_eq(psser.rmod(150), pser.rmod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.rmod(psdf.b), pdf.a.rmod(pdf.b))
def test_asof(self):
pser = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.asof(20), pser.asof(20))
self.assert_eq(psser.asof([5, 20]).sort_index(), pser.asof([5, 20]).sort_index())
self.assert_eq(psser.asof(100), pser.asof(100))
self.assert_eq(repr(psser.asof(-100)), repr(pser.asof(-100)))
self.assert_eq(psser.asof([-100, 100]).sort_index(), pser.asof([-100, 100]).sort_index())
# where cannot be an Index, Series or a DataFrame
self.assertRaises(ValueError, lambda: psser.asof(ps.Index([-100, 100])))
self.assertRaises(ValueError, lambda: psser.asof(ps.Series([-100, 100])))
self.assertRaises(ValueError, lambda: psser.asof(ps.DataFrame({"A": [1, 2, 3]})))
# asof is not supported for a MultiIndex
pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("y", "d")])
psser = ps.from_pandas(pser)
self.assertRaises(ValueError, lambda: psser.asof(20))
# asof requires a sorted index (More precisely, should be a monotonic increasing)
psser = ps.Series([1, 2, np.nan, 4], index=[10, 30, 20, 40], name="Koalas")
self.assertRaises(ValueError, lambda: psser.asof(20))
psser = ps.Series([1, 2, np.nan, 4], index=[40, 30, 20, 10], name="Koalas")
self.assertRaises(ValueError, lambda: psser.asof(20))
pidx = pd.DatetimeIndex(["2013-12-31", "2014-01-02", "2014-01-03"])
pser = pd.Series([1, 2, np.nan], index=pidx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.asof("2014-01-01"), pser.asof("2014-01-01"))
self.assert_eq(psser.asof("2014-01-02"), pser.asof("2014-01-02"))
self.assert_eq(repr(psser.asof("1999-01-02")), repr(pser.asof("1999-01-02")))
def test_squeeze(self):
# Single value
pser = pd.Series([90])
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
# Single value with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "b", "c")])
pser = pd.Series([90], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
# Multiple values
pser = pd.Series([90, 91, 85])
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
# Multiple values with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series([90, 91, 85], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pser = pd.Series(["a", "b", "c", "d"], index=pidx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.swaplevel(), psser.swaplevel())
self.assert_eq(pser.swaplevel(0, 1), psser.swaplevel(0, 1))
self.assert_eq(pser.swaplevel(1, 1), psser.swaplevel(1, 1))
self.assert_eq(pser.swaplevel("number", "color"), psser.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pser = pd.Series(["a", "b", "c", "d"], index=pidx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.swaplevel(), psser.swaplevel())
self.assert_eq(pser.swaplevel(0, 1), psser.swaplevel(0, 1))
self.assert_eq(pser.swaplevel(0, 2), psser.swaplevel(0, 2))
self.assert_eq(pser.swaplevel(1, 2), psser.swaplevel(1, 2))
self.assert_eq(pser.swaplevel(1, 1), psser.swaplevel(1, 1))
self.assert_eq(pser.swaplevel(-1, -2), psser.swaplevel(-1, -2))
self.assert_eq(pser.swaplevel("number", "color"), psser.swaplevel("number", "color"))
self.assert_eq(pser.swaplevel("number", "size"), psser.swaplevel("number", "size"))
self.assert_eq(pser.swaplevel("color", "size"), psser.swaplevel("color", "size"))
# Error conditions
self.assertRaises(AssertionError, lambda: ps.Series([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psser.swaplevel(0, 9))
self.assertRaises(KeyError, lambda: psser.swaplevel("not_number", "color"))
self.assertRaises(AssertionError, lambda: psser.swaplevel(copy=False))
def test_swapaxes(self):
pser = pd.Series([1, 2, 3], index=["x", "y", "z"], name="ser")
psser = ps.from_pandas(pser)
self.assert_eq(psser.swapaxes(0, 0), pser.swapaxes(0, 0))
self.assert_eq(psser.swapaxes("index", "index"), pser.swapaxes("index", "index"))
self.assert_eq((psser + 1).swapaxes(0, 0), (pser + 1).swapaxes(0, 0))
self.assertRaises(AssertionError, lambda: psser.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psser.swapaxes(0, 1))
self.assertRaises(ValueError, lambda: psser.swapaxes("index", "columns"))
def test_div_zero_and_nan(self):
pser = pd.Series([100, None, -300, None, 500, -700, np.inf, -np.inf], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.div(0), psser.div(0))
self.assert_eq(pser.truediv(0), psser.truediv(0))
self.assert_eq(pser / 0, psser / 0)
self.assert_eq(pser.div(np.nan), psser.div(np.nan))
self.assert_eq(pser.truediv(np.nan), psser.truediv(np.nan))
self.assert_eq(pser / np.nan, psser / np.nan)
# floordiv has different behavior in pandas > 1.0.0 when divide by 0
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(pser.floordiv(0), psser.floordiv(0))
self.assert_eq(pser // 0, psser // 0)
else:
result = pd.Series(
[np.inf, np.nan, -np.inf, np.nan, np.inf, -np.inf, np.inf, -np.inf], name="Koalas"
)
self.assert_eq(psser.floordiv(0), result)
self.assert_eq(psser // 0, result)
self.assert_eq(pser.floordiv(np.nan), psser.floordiv(np.nan))
def test_mad(self):
pser = pd.Series([1, 2, 3, 4], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
pser = pd.Series([None, -2, 5, 10, 50, np.nan, -20], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([1, 2, 3, 4, 5], name="Koalas")
pser.index = pmidx
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([None, -2, 5, 50, np.nan], name="Koalas")
pser.index = pmidx
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
def test_to_frame(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), psser.to_frame(name="a"))
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), psser.to_frame(name="a"))
def test_shape(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.shape, psser.shape)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.shape, psser.shape)
@unittest.skipIf(not have_tabulate, tabulate_requirement_message)
def test_to_markdown(self):
pser = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
psser = ps.from_pandas(pser)
# `to_markdown()` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assertRaises(NotImplementedError, lambda: psser.to_markdown())
else:
self.assert_eq(pser.to_markdown(), psser.to_markdown())
def test_unstack(self):
pser = pd.Series(
[10, -2, 4, 7],
index=pd.MultiIndex.from_tuples(
[("one", "a", "z"), ("one", "b", "x"), ("two", "a", "c"), ("two", "b", "v")],
names=["A", "B", "C"],
),
)
psser = ps.from_pandas(pser)
levels = [-3, -2, -1, 0, 1, 2]
for level in levels:
pandas_result = pser.unstack(level=level)
pandas_on_spark_result = psser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, pandas_on_spark_result)
self.assert_eq(pandas_result.index.names, pandas_on_spark_result.index.names)
self.assert_eq(pandas_result.columns.names, pandas_on_spark_result.columns.names)
# non-numeric datatypes
pser = pd.Series(
list("abcd"), index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]])
)
psser = ps.from_pandas(pser)
levels = [-2, -1, 0, 1]
for level in levels:
pandas_result = pser.unstack(level=level)
pandas_on_spark_result = psser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, pandas_on_spark_result)
self.assert_eq(pandas_result.index.names, pandas_on_spark_result.index.names)
self.assert_eq(pandas_result.columns.names, pandas_on_spark_result.columns.names)
# Exceeding the range of level
self.assertRaises(IndexError, lambda: psser.unstack(level=3))
self.assertRaises(IndexError, lambda: psser.unstack(level=-4))
# Only support for MultiIndex
psser = ps.Series([10, -2, 4, 7])
self.assertRaises(ValueError, lambda: psser.unstack())
def test_item(self):
psser = ps.Series([10, 20])
self.assertRaises(ValueError, lambda: psser.item())
def test_filter(self):
pser = pd.Series([0, 1, 2], index=["one", "two", "three"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.filter(items=["one", "three"]), psser.filter(items=["one", "three"]))
self.assert_eq(pser.filter(regex="e$"), psser.filter(regex="e$"))
self.assert_eq(pser.filter(like="hre"), psser.filter(like="hre"))
with self.assertRaisesRegex(ValueError, "Series does not support columns axis."):
psser.filter(like="hre", axis=1)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("one", "x"), ("two", "y"), ("three", "z")])
pser = pd.Series([0, 1, 2], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(
pser.filter(items=[("one", "x"), ("three", "z")]),
psser.filter(items=[("one", "x"), ("three", "z")]),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psser.filter(items=[["one", "x"], ("three", "z")])
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psser.filter(items=[(), ("three", "z")])
def test_abs(self):
pser = pd.Series([-2, -1, 0, 1])
psser = ps.from_pandas(pser)
self.assert_eq(abs(psser), abs(pser))
self.assert_eq(np.abs(psser), np.abs(pser))
def test_bfill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.bfill(), pser.bfill())
self.assert_eq(psser.bfill()[0], pser.bfill()[0])
psser.bfill(inplace=True)
pser.bfill(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psser[0], pser[0])
self.assert_eq(psdf, pdf)
def test_ffill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.ffill(), pser.ffill())
self.assert_eq(psser.ffill()[4], pser.ffill()[4])
psser.ffill(inplace=True)
pser.ffill(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psser[4], pser[4])
self.assert_eq(psdf, pdf)
def test_iteritems(self):
pser = pd.Series(["A", "B", "C"])
psser = ps.from_pandas(pser)
for (p_name, p_items), (k_name, k_items) in zip(pser.iteritems(), psser.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_droplevel(self):
# droplevel is new in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
pser = pd.Series(
[1, 2, 3],
index=pd.MultiIndex.from_tuples(
[("x", "a", "q"), ("x", "b", "w"), ("y", "c", "e")],
names=["level_1", "level_2", "level_3"],
),
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.droplevel(0), psser.droplevel(0))
self.assert_eq(pser.droplevel("level_1"), psser.droplevel("level_1"))
self.assert_eq(pser.droplevel(-1), psser.droplevel(-1))
self.assert_eq(pser.droplevel([0]), psser.droplevel([0]))
self.assert_eq(pser.droplevel(["level_1"]), psser.droplevel(["level_1"]))
self.assert_eq(pser.droplevel((0,)), psser.droplevel((0,)))
self.assert_eq(pser.droplevel(("level_1",)), psser.droplevel(("level_1",)))
self.assert_eq(pser.droplevel([0, 2]), psser.droplevel([0, 2]))
self.assert_eq(
pser.droplevel(["level_1", "level_3"]), psser.droplevel(["level_1", "level_3"])
)
self.assert_eq(pser.droplevel((1, 2)), psser.droplevel((1, 2)))
self.assert_eq(
pser.droplevel(("level_2", "level_3")), psser.droplevel(("level_2", "level_3"))
)
with self.assertRaisesRegex(KeyError, "Level {0, 1, 2} not found"):
psser.droplevel({0, 1, 2})
with self.assertRaisesRegex(KeyError, "Level level_100 not found"):
psser.droplevel(["level_1", "level_100"])
with self.assertRaisesRegex(
IndexError, "Too many levels: Index has only 3 levels, not 11"
):
psser.droplevel(10)
with self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 3 levels, -10 is not a valid level number",
):
psser.droplevel(-10)
with self.assertRaisesRegex(
ValueError,
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left.",
):
psser.droplevel([0, 1, 2])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 5 levels from an index with 3 levels: "
"at least one level must be left.",
):
psser.droplevel([1, 1, 1, 1, 1])
# Tupled names
pser.index.names = [("a", "1"), ("b", "2"), ("c", "3")]
psser = ps.from_pandas(pser)
self.assert_eq(
pser.droplevel([("a", "1"), ("c", "3")]), psser.droplevel([("a", "1"), ("c", "3")])
)
def test_dot(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
psdf = ps.from_pandas(pdf)
self.assert_eq((psdf["b"] * 10).dot(psdf["a"]), (pdf["b"] * 10).dot(pdf["a"]))
self.assert_eq((psdf["b"] * 10).dot(psdf), (pdf["b"] * 10).dot(pdf))
self.assert_eq((psdf["b"] * 10).dot(psdf + 1), (pdf["b"] * 10).dot(pdf + 1))
def test_tail(self):
pser = pd.Series(range(1000), name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.tail(), psser.tail())
self.assert_eq(pser.tail(10), psser.tail(10))
self.assert_eq(pser.tail(-990), psser.tail(-990))
self.assert_eq(pser.tail(0), psser.tail(0))
self.assert_eq(pser.tail(1001), psser.tail(1001))
self.assert_eq(pser.tail(-1001), psser.tail(-1001))
self.assert_eq((pser + 1).tail(), (psser + 1).tail())
self.assert_eq((pser + 1).tail(10), (psser + 1).tail(10))
self.assert_eq((pser + 1).tail(-990), (psser + 1).tail(-990))
self.assert_eq((pser + 1).tail(0), (psser + 1).tail(0))
self.assert_eq((pser + 1).tail(1001), (psser + 1).tail(1001))
self.assert_eq((pser + 1).tail(-1001), (psser + 1).tail(-1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
psser.tail("10")
def test_product(self):
pser = pd.Series([10, 20, 30, 40, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# Containing NA values
pser = pd.Series([10, np.nan, 30, np.nan, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod(), almost=True)
# All-NA values
pser = pd.Series([np.nan, np.nan, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# Empty Series
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# Boolean Series
pser = pd.Series([True, True, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
pser = pd.Series([False, False, False])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
pser = pd.Series([True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# With `min_count` parameter
pser = pd.Series([10, 20, 30, 40, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=5), psser.prod(min_count=5))
self.assert_eq(pser.prod(min_count=6), psser.prod(min_count=6))
pser = pd.Series([10, np.nan, 30, np.nan, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=3), psser.prod(min_count=3), almost=True)
self.assert_eq(pser.prod(min_count=4), psser.prod(min_count=4))
pser = pd.Series([np.nan, np.nan, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=1), psser.prod(min_count=1))
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=1), psser.prod(min_count=1))
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
ps.Series(["a", "b", "c"]).prod()
with self.assertRaisesRegex(
TypeError, "Could not convert datetime64\\[ns\\] \\(timestamp\\) to numeric"
):
ps.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).prod()
def test_hasnans(self):
# BooleanType
pser = pd.Series([True, False, True, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
pser = pd.Series([True, False, np.nan, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
# TimestampType
pser = pd.Series([pd.Timestamp("2020-07-30") for _ in range(3)])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
pser = pd.Series([pd.Timestamp("2020-07-30"), np.nan, pd.Timestamp("2020-07-30")])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
def test_last_valid_index(self):
pser = pd.Series([250, 1.5, 320, 1, 0.3, None, None, None, None])
psser = ps.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), psser.last_valid_index())
# MultiIndex columns
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser.index = midx
psser = ps.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), psser.last_valid_index())
# Empty Series
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), psser.last_valid_index())
def test_first_valid_index(self):
# Empty Series
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.first_valid_index(), psser.first_valid_index())
def test_factorize(self):
pser = pd.Series(["a", "b", "a", "b"])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series([5, 1, 5, 1])
psser = ps.from_pandas(pser)
pcodes, puniques = (pser + 1).factorize(sort=True)
kcodes, kuniques = (psser + 1).factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series(["a", "b", "a", "b"], name="ser", index=["w", "x", "y", "z"])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series(
["a", "b", "a", "b"], index=pd.MultiIndex.from_arrays([[4, 3, 2, 1], [1, 2, 3, 4]])
)
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
#
# Deals with None and np.nan
#
pser = pd.Series(["a", "b", "a", np.nan])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series([1, None, 3, 2, 1])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series(["a", None, "a"])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series([None, np.nan])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes, kcodes.to_list())
# pandas: Float64Index([], dtype='float64')
self.assert_eq(pd.Index([]), kuniques)
pser = pd.Series([np.nan, np.nan])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes, kcodes.to_list())
# pandas: Float64Index([], dtype='float64')
self.assert_eq(pd.Index([]), kuniques)
#
# Deals with na_sentinel
#
# pandas >= 1.1.2 support na_sentinel=None
# pandas >= 0.24 support na_sentinel not to be -1
#
pd_below_1_1_2 = LooseVersion(pd.__version__) < LooseVersion("1.1.2")
pd_below_0_24 = LooseVersion(pd.__version__) < LooseVersion("0.24")
pser = pd.Series(["a", "b", "a", np.nan, None])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True, na_sentinel=-2)
kcodes, kuniques = psser.factorize(na_sentinel=-2)
self.assert_eq([0, 1, 0, -2, -2] if pd_below_0_24 else pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pcodes, puniques = pser.factorize(sort=True, na_sentinel=2)
kcodes, kuniques = psser.factorize(na_sentinel=2)
self.assert_eq([0, 1, 0, 2, 2] if pd_below_0_24 else pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
if not pd_below_1_1_2:
pcodes, puniques = pser.factorize(sort=True, na_sentinel=None)
kcodes, kuniques = psser.factorize(na_sentinel=None)
self.assert_eq(pcodes.tolist(), kcodes.to_list())
# puniques is Index(['a', 'b', nan], dtype='object')
self.assert_eq(ps.Index(["a", "b", None]), kuniques)
psser = ps.Series([1, 2, np.nan, 4, 5]) # Arrow takes np.nan as null
psser.loc[3] = np.nan # Spark takes np.nan as NaN
kcodes, kuniques = psser.factorize(na_sentinel=None)
pcodes, puniques = psser.to_pandas().factorize(sort=True, na_sentinel=None)
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
def test_pad(self):
pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x")
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pser.pad(), psser.pad())
# Test `inplace=True`
pser.pad(inplace=True)
psser.pad(inplace=True)
self.assert_eq(pser, psser)
else:
expected = ps.Series([np.nan, 2, 3, 4, 4, 6], name="x")
self.assert_eq(expected, psser.pad())
# Test `inplace=True`
psser.pad(inplace=True)
self.assert_eq(expected, psser)
def test_explode(self):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
psser = ps.from_pandas(pser)
self.assert_eq(pser.explode(), psser.explode(), almost=True)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
psser = ps.from_pandas(pser)
self.assert_eq(pser.explode(), psser.explode(), almost=True)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
psser = ps.from_pandas(pser)
self.assert_eq(pser.explode(), psser.explode())
else:
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
psser = ps.from_pandas(pser)
expected = pd.Series([1.0, 2.0, 3.0, None, None, 3.0, 4.0], index=[0, 0, 0, 1, 2, 3, 3])
self.assert_eq(psser.explode(), expected)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
psser = ps.from_pandas(pser)
expected = pd.Series(
[1.0, 2.0, 3.0, None, None, 3.0, 4.0],
index=pd.MultiIndex.from_tuples(
[
("a", "w"),
("a", "w"),
("a", "w"),
("b", "x"),
("c", "y"),
("d", "z"),
("d", "z"),
]
),
)
self.assert_eq(psser.explode(), expected)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
psser = ps.from_pandas(pser)
expected = pser
self.assert_eq(psser.explode(), expected)
def test_argsort(self):
# Without null values
pser = pd.Series([0, -100, 50, 100, 20], index=["A", "B", "C", "D", "E"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# With name
pser.name = "Koalas"
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# Series from Index
pidx = pd.Index([4.0, -6.0, 2.0, -100.0, 11.0, 20.0, 1.0, -99.0])
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from Index with name
pidx.name = "Koalas"
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from DataFrame
pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.A.argsort().sort_index(), psdf.A.argsort().sort_index())
self.assert_eq((-pdf.A).argsort().sort_index(), (-psdf.A).argsort().sort_index())
# With null values
pser = pd.Series([0, -100, np.nan, 100, np.nan], index=["A", "B", "C", "D", "E"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# MultiIndex with null values
pser.index = pd.MultiIndex.from_tuples(
[("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# With name with null values
pser.name = "Koalas"
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# Series from Index with null values
pidx = pd.Index([4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0])
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from Index with name with null values
pidx.name = "Koalas"
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from DataFrame with null values
pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.A.argsort().sort_index(), psdf.A.argsort().sort_index())
self.assert_eq((-pdf.A).argsort().sort_index(), (-psdf.A).argsort().sort_index())
def test_argmin_argmax(self):
pser = pd.Series(
{
"Corn Flakes": 100.0,
"Almond Delight": 110.0,
"Cinnamon Toast Crunch": 120.0,
"Cocoa Puff": 110.0,
"Expensive Flakes": 120.0,
"Cheap Flakes": 100.0,
},
name="Koalas",
)
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(pser.argmin(), psser.argmin())
self.assert_eq(pser.argmax(), psser.argmax())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.argmin(), psser.argmin())
self.assert_eq(pser.argmax(), psser.argmax())
# Null Series
self.assert_eq(pd.Series([np.nan]).argmin(), ps.Series([np.nan]).argmin())
self.assert_eq(pd.Series([np.nan]).argmax(), ps.Series([np.nan]).argmax())
else:
self.assert_eq(pser.values.argmin(), psser.argmin())
self.assert_eq(pser.values.argmax(), psser.argmax())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.values.argmin(), psser.argmin())
self.assert_eq(pser.values.argmax(), psser.argmax())
# Null Series
self.assert_eq(-1, ps.Series([np.nan]).argmin())
self.assert_eq(-1, ps.Series([np.nan]).argmax())
with self.assertRaisesRegex(ValueError, "attempt to get argmin of an empty sequence"):
ps.Series([]).argmin()
with self.assertRaisesRegex(ValueError, "attempt to get argmax of an empty sequence"):
ps.Series([]).argmax()
def test_backfill(self):
pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x")
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pser.backfill(), psser.backfill())
# Test `inplace=True`
pser.backfill(inplace=True)
psser.backfill(inplace=True)
self.assert_eq(pser, psser)
else:
expected = ps.Series([2.0, 2.0, 3.0, 4.0, 6.0, 6.0], name="x")
self.assert_eq(expected, psser.backfill())
# Test `inplace=True`
psser.backfill(inplace=True)
self.assert_eq(expected, psser)
def test_align(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
psdf = ps.from_pandas(pdf)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0]:
psser_l, psser_r = psdf.a.align(psdf.b, join=join, axis=axis)
pser_l, pser_r = pdf.a.align(pdf.b, join=join, axis=axis)
self.assert_eq(psser_l, pser_l)
self.assert_eq(psser_r, pser_r)
psser_l, psdf_r = psdf.b.align(psdf[["b", "a"]], join=join, axis=axis)
pser_l, pdf_r = pdf.b.align(pdf[["b", "a"]], join=join, axis=axis)
self.assert_eq(psser_l, pser_l)
self.assert_eq(psdf_r, pdf_r)
self.assertRaises(ValueError, lambda: psdf.a.align(psdf.b, axis=1))
def test_pow_and_rpow(self):
pser = pd.Series([1, 2, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(pser.pow(np.nan), psser.pow(np.nan))
self.assert_eq(pser ** np.nan, psser ** np.nan)
self.assert_eq(pser.rpow(np.nan), psser.rpow(np.nan))
self.assert_eq(1 ** pser, 1 ** psser)
def test_between_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pser = pd.Series([1, 2, 3, 4], index=idx)
psser = ps.from_pandas(pser)
self.assert_eq(
pser.between_time("0:15", "0:45").sort_index(),
psser.between_time("0:15", "0:45").sort_index(),
)
pser.index.name = "ts"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.between_time("0:15", "0:45").sort_index(),
psser.between_time("0:15", "0:45").sort_index(),
)
pser.index.name = "index"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.between_time("0:15", "0:45").sort_index(),
psser.between_time("0:15", "0:45").sort_index(),
)
def test_at_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pser = pd.Series([1, 2, 3, 4], index=idx)
psser = ps.from_pandas(pser)
self.assert_eq(
pser.at_time("0:20").sort_index(),
psser.at_time("0:20").sort_index(),
)
pser.index.name = "ts"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.at_time("0:20").sort_index(),
psser.at_time("0:20").sort_index(),
)
pser.index.name = "index"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.at_time("0:20").sort_index(),
psser.at_time("0:20").sort_index(),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_series import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
NLeSC/embodied-emotions-scripts | embem/machinelearning/rakel_clf.py | 1 | 2054 | """Script to train a rakel classifier.
Usage: python br_rakel.py <input dir> <output dir>
Made for use by do_rakel.sh: ./do_rakel.sh <data dir>
The script expects a train.txt and a test.txt containing the train and test
set in the data directory.
"""
from __future__ import print_function
import argparse
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from rakel import RandomKLabelsets
from mlutils import get_data, print_results, split
from nltk.corpus import stopwords as sw
import string
import os
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='directory containing the train and test'
' data')
parser.add_argument('out_dir', help='directory output should be saved to')
args = parser.parse_args()
stopwords = sw.words('dutch') + [p for p in string.punctuation]
out_dir = args.out_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
#classifier_dir = '{}/classifier/'.format(out_dir)
#if not os.path.exists(classifier_dir):
# os.makedirs(classifier_dir)
for run in range(1, 11):
print("Run", run)
train_file = '{}/train_{}.txt'.format(args.input_dir, run)
test_file = '{}/test_{}.txt'.format(args.input_dir, run)
out_file = '{}/output_{}.txt'.format(out_dir, run)
X_train, X_test, Y_train, Y_test, classes_ = get_data(train_file,
test_file)
#print(Y_train.shape)
clf = make_pipeline(TfidfVectorizer(analyzer=split,
stop_words=stopwords),
RandomKLabelsets(LinearSVC(class_weight='auto'),
n_estimators=Y_train.shape[1]*2,
labels_per_estimator=3))
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
print_results(Y_test, Y_pred, classes_, open(out_file, 'w'))
# save classifier
#joblib.dump(clf, '{}/classifier.pkl'.format(classifier_dir))
| apache-2.0 |
mapagron/Boot_camp | hw3/hw3ch1v2.py | 1 | 4760 | #Code for Challenge#1_Homework3
#Financial analisys
#10_01_17: Result: The file is created but only has rows, it does not have any data
#Order of the task:
#1. Merging the files (importing libraries, open files, and creating a new file)
#1.1 To create the new file use in the column 1(date), separator "-" to get the month
#1.1 importing libraries
import os
import csv
#import pandas as pd
#import numpy as np
#import seaborn
#1.2 Importing data
#Name files with no spaces
data1 = os.path.join('Resources', 'budget_data_1.csv')
data2 = os.path.join('Resources', 'budget_data_2.csv')
#1.3 Creating lists to new file
#newdate = []
datemonth = []
dateyear = []
revenue = []
revenueTwo = []
#1.4 using with to create the new file
#firts: testing with one
cvsfiles = [data1, data2]
for file in cvsfiles:
with open(file, newline="") as cvsfileone:
csvreaderone = csv.reader(cvsfileone, delimiter = ",")
#creating the for loop to read all the rows in budget_data_1
for row in csvreaderone:
#Using .append to add whatever needs to be add to the lists date = [] and revenue = []
#1.4.1 as to count month separete than day, I will separate them
newdate = row[0].split("-")
#to test out if new date is actually splitting. The answer is YES!
#print(newdate)
#1.4.2 .append the revenue
revenue.append(row[1])
dateyear.append(newdate[0])
# for this one I got an error : list index out of range - it was because the data has different size
datemonth.append(newdate[1])
#printing variables
#Result : All printed - and working
#print(revenue)
#print(datemonth)
#print(dateyear)
#2. Calculate
#2.1 The total number of months included in the dataset
numberMonths = len(datemonth)
print (numberMonths)
#2.2 The total amount of revenue gained over the entire period
#removing header from revenue
#revenue.remove("Revenue")
#revenue.pop([0])
#print(revenue)
# changing revenue as integer
#list comprenhension: for i (each value that finds)
# type ---- command to get tha type of object
for i in revenue:
if i == "Revenue":
revenue.remove("Revenue")
#print("found it")
else:
revenueTwo.append(int(i))
#print (revenueTwo)
#calculating total amount of revenue
TotalRevenue = sum(revenueTwo)
print (TotalRevenue)
#2.3 The average change in revenue between months over the entire period
#creating a function to calculate the average
def average (list1):
suma = sum((i) for i in list1)
#if suma == 0:
#suma = sum((i+1) for i in list1)
average = suma / len(list1)
return average
print (average(revenueTwo))
#2.4 The greatest increase in revenue (date and amount) over the entire period
print (max(revenueTwo))
#2.5 The greatest decrease in revenue (date and amount) over the entire period
print (min(revenueTwo))
#1.5 Zipping into a new file
#Zip is an iterator in pandas so to create the data frame, I have to create them as lists.
#combinedcsv = list(zip(datemonth, dateyear, revenue))
#print(combinedcsv)
#trying without creating a list
'''
combinedcsv = zip(datemonth, dateyear, revenue)
# 1.6 Set variable for output file
outputfile = os.path.join("budget_data_final.csv")
# 1.7 Open the new file
# w: argument that allows writting
with open(outputfile, "w", newline="") as csvfinal:
writer = csv.writer(csvfinal)
#to add headers'
writer.writerow(["day", "month", "revenue"])
#write in zipped rows
writer.writerow(combinedcsv)
#Result - it is printing as rows not as columns (???)
#So how can I include the printing as rows ?
#writer.writerow(combinedcsv) // pd.DataFrame(combinedcsv)
'''
#3. Work with Pandas - no valid for this assignment
#This allows me to create columns
#dataframestocks = pd.DataFrame(combinedcsv)
#print(dataframestocks)
#Test_Pivot_Table
#dataframestocks.pivot(index='month', values='revenue')
#for i
#The total amount of revenue gained over the entire period
#totalAverage = sum(dataframestocks[3])
#print(totalAverage)
#sum(int(revenue)
#print(TotalRevenue)
#The average change in revenue between months over the entire period
#The greatest increase in revenue (date and amount) over the entire period
#The greatest decrease in revenue (date and amount) over the entire period
#QUESTIONS FOR CLASS
# Files written as columns - there is a simpler way?
# To do operations with columns and calling the column - See these two examples:
#The total number of months included in the dataset
'''
countMonth = 0
for i in dataframestocks:
if dataframestocks[0:i] > dataframestocks[0:i + 1]
countMonth = countMonth + 1
#The total amount of revenue gained over the entire period
for i in dataframestrocks:
totalAverage = sum(dataframestocks[3])
print(totalAverage)
'''
| gpl-3.0 |
sinhrks/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
simontorres/bravo | gui/gui_con_pdm_play.py | 1 | 7734 | #import sys
import matplotlib
matplotlib.use('QT4Agg')
from matplotlib.widgets import Button
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import UnivariateSpline
from matplotlib.widgets import MultiCursor
import os
import argparse
from astropy.stats import LombScargle
def get_args(arguments=None):
parser = argparse.ArgumentParser(
description="Interactive tool to search for variable objects")
parser.add_argument('table',
action='store',
help="Table of photometry or radial velocity data.")
args = parser.parse_args(args=arguments)
return args
def cargar_datos(tabla):
data = np.genfromtxt(tabla)
data=data[data[:,0].argsort()]
jda = data[:,0]
maga = data[:,1]
erra = data[:,2]
return jda, maga, erra
"""
def calculo_fase(mag, date, er, per, T0):
fase2, tt2, err, t = [], [], [], []
for i in range(len(mag)):
fa2 = ((float(date[i]) * 1.0 - T0) / per) - int((float(date[i]) - T0) / per)
if fa2 > 0:
fase2.append(fa2)
tt2.append(fa2 + 1.0)
t.append(date[i])
err.append(er[i])
else:
fase2.append(fa2 + 1)
tt2.append(fa2 + 2)
t.append(date[i])
err.append(er[i])
fase2 = np.array(fase2)
tt2 = np.array(tt2)
err = np.array(err)
re2 = np.concatenate((fase2, tt2), axis=0)
mag3 = np.concatenate((mag, mag), axis=0)
t2 = np.concatenate((t, t), axis=0)
err2 = np.concatenate((err, err), axis=0)
return re2, mag3, t2, err2
"""
def calculo_fase(mag, date, er, per, T0):
fase2 = np.modf(date/per)[0]
tt2 = fase2+1
re2 = np.concatenate((fase2, tt2), axis=0)
mag3 = np.concatenate((mag, mag), axis=0)
t2 = np.concatenate((date, date), axis=0)
err2 = np.concatenate((er, er), axis=0)
return re2, mag3, t2, err2
def spline(jda,maga,orden,splineYes=True):
spl = UnivariateSpline(jda, maga, k=orden)
return spl,splineYes
class GuiExample(object):
def __init__(self):
self.tabla = None
self.fig = None
self.ax1 = None
self.ax2 = None
self.ax3 = None
self.ax1_bb = None
self.line_plot = None
self.jda = None
self.maga = None
self.erra = None
self.per = None
self.t0 = None
self.min_per = 0.1
self.max_per = 10.0
# self.step_per = 80000.0
self.step_pdm = 5000.0
self.periodos = None
self.omega = None
self.PS = None
self.model = None
self.power = None
self.freqs = None
self.spl = None
self.splineYes = None
self.multi = None
self.name_star = True
self.auto_per=None
def __call__(self, tabla, *args, **kwargs):
self.tabla = tabla
self.jda, self.maga, self.erra = cargar_datos(self.tabla)
self.fig, (self.ax1, self.ax2 , self.ax3) = plt.subplots(nrows=3)
self.fig.subplots_adjust(hspace=0.27, bottom=0.07, top=0.99, left=0.08, right=0.98)
self.multi = MultiCursor(self.fig.canvas, (self.ax1,self.ax2), color='r', \
lw=.5, horizOn=None, vertOn=True)
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
#jd/mag
if self.splineYes == True:
self.spl, self.splineYes = spline(self.jda, self.maga, 5)
print "Aplicando spline"
self.maga = self.maga - self.spl(self.jda)
#self.ax1.plot(self.jda, self.spl(self.jda), 'r--', lw=3)
self.spl, self.splineYes = spline(self.jda, self.maga, 5)
self.ax1.plot(self.jda, self.spl(self.jda), 'r--', lw=3)
self.maga = self.maga - self.spl(self.jda)
else:
print "NO aplicando spline"
self.spl,self.splineYes = spline(self.jda, self.maga,5)
self.ax1.plot(self.jda, self.spl(self.jda), 'r--', lw=3)
self.ax1.plot(self.jda, self.maga, '.', c='black')
self.ax1.set_ylim(max(self.maga+0.01), min(self.maga)-0.01)
self.ax1.set_xlim(min(self.jda)-10, max(self.jda)+0.01)
self.ax1.set_xlabel(r'Time', fontsize=20)
self.ax1.set_ylabel(r"Magnitud", fontsize=20)
if self.name_star!=True:
self.ax1.text(0.03, 0.145, "%s"%self.name_star, ha='left', va='top', \
transform=self.ax1.transAxes, fontsize=25,color="red")
#LS astropy
frequency2, power3 = LombScargle(self.jda, self.maga, self.erra).autopower\
(minimum_frequency=1 / self.max_per,maximum_frequency=1 / self.min_per)
self.ax2.plot(1.0 / frequency2, power3, '-', c='cyan', lw=1, zorder=1, \
label="PyLS")
if self.auto_per == True:
self.step_per = len(frequency2)
"""
#GLS
self.periodos=np.linspace(self.min_per, self.max_per, self.step_per)
self.omega = 2 * np.pi / self.periodos
self.PS = lomb_scargle(self.jda, self.maga, self.erra, self.omega, generalized=True)
self.ax2.plot(self.periodos, self.PS, '-', c='black', lw=1, zorder=1,label="GLS")
#LS
model = periodic.LombScargle().fit(self.jda, self.maga, self.erra)
fmin = 1.0 / self.max_per
fmax = 1.0 / self.min_per
df = (fmax - fmin) / self.step_per
self.power = model.score_frequency_grid(fmin, df, self.step_per)
self.freqs = fmin + df * np.arange(self.step_per)
self.ax2.plot(1.0/self.freqs, self.power, '-', c='red', lw=1, zorder=1,label="LS")
self.ax2.legend(fontsize = 'x-large')
"""
#PDM
os.system("awk '{print $1,$2,$3}' %s > borrar.dat"%tabla)
self.tabla="borrar.dat"
longi=open(self.tabla, 'r').read().count("\n")
pdm1=float(os.popen("./pdmmm %s %0.1f %0.3f %0.3f 10 5 %0.3f"%(self.tabla,longi,self.min_per,self.max_per,self.step_pdm)).readlines()[0])
f_11=np.genfromtxt(self.tabla+".pdm")
Pdm1t,Ppdm1=f_11[:,0],1./f_11[:,1]
# print len(f_11[0]),1./min(f_11[1])
self.ax2.plot(Ppdm1, Pdm1t, '-', c='green', lw=1, zorder=1,label="PDM",alpha=0.8)
###
self.ax2.legend(fontsize = 'x-large')
self.ax2.set_xlabel(r'Periodo', fontsize=20)
self.ax2.set_ylabel(r"Power", fontsize=20)
self.ax2.set_xlim(self.min_per, self.max_per)
self.ax1_bb = self.ax2.get_position()
self.fig.canvas.mpl_connect('motion_notify_event', self.on_mouse_over)
plt.tight_layout()
plt.show()
def on_mouse_over(self, event):
ax1_x, ax1_y = \
self.fig.transFigure.inverted().transform((event.x, event.y))
if self.ax1_bb.contains(ax1_x, ax1_y):
if self.line_plot is not None:
try:
self.line_plot.remove()
self.ax3.relim()
except:
pass
if event.ydata is not None:
self.per=event.xdata
print event.xdata
self.t0=0
fasA,magniA,t_A,er_A=calculo_fase(self.maga, self.jda, self.erra, self.per, self.t0)
self.line_plot, = self.ax3.plot(fasA,magniA,".",color='grey',alpha=0.2198)
self.ax3.set_xlim(0,2)
self.ax3.set_ylim(max(magniA+0.01), min(magniA)-0.01)
self.fig.canvas.draw()
self.ax3.set_xlabel(r'Fase', fontsize=20)
self.ax3.set_ylabel(r"Magnitud", fontsize=20)
if __name__ == '__main__':
args = get_args()
# print(args.table)
gui = GuiExample()
gui(tabla=args.table)
| gpl-3.0 |
easonlius/fingercode | run.py | 1 | 1550 | # _* coding: utf-8 _*_
# description : run the database to get the result
import cv2
import matplotlib.pyplot as plt
from DB import *
from fingercode import *
db_name = "final.db"
result = get_all(db_name)
images = get_image_name(result)
counts = []
for i in result:
counts.append(convert_list(i))
# all the results cal_eluer distabce
correct_images = []
all_temp = []
for i in range(len(counts)):
images_temp = []
for j in range(len(counts)):
if images[i] != images[j]:
if images[j] not in all_temp:
temp = cal_euler_distance(counts[i], counts[j])
#print temp
if (temp < 3000):
print temp
all_temp.append(images[j])
print images[i], images[j]
images_temp.append(images[j])
#counts.remove(counts[j])
img1 = cv2.imread(images[i])
img2 = cv2.imread(images[j])
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(img1)
plt.subplot(1, 2, 2)
plt.imshow(img2)
plt.show()
correct_images.append(images_temp)
print len(correct_images)
"""
jk = []
for i in correct_images:
if len(i) > 1:
jk.append(i)
rows = len(jk)
for i in jk:
plt.figure()
index = 1
for j in i:
plt.subplot(rows, 4, index)
img = cv2.imread(j)
plt.imshow(img)
index += 1
plt.show()
""" | mit |
lin-credible/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
osvaldomx/Modelos | LSTM_CONV/lstm.py | 1 | 10725 | # coding: utf-8
import numpy as np
import scipy.io as sio
from scipy.interpolate import griddata
from functools import reduce
from keras.models import Model
from keras.layers import Dropout, Dense, Input, Flatten, Permute, Reshape, concatenate
from keras.layers.convolutional import Conv2D, Conv1D
from keras.layers.pooling import MaxPooling2D
from keras.layers.recurrent import LSTM
from keras.optimizers import adam
from keras.utils import plot_model, to_categorical
from utils import cart2sph, pol2cart, augment_EEG, reformatInput
from sklearn.preprocessing import scale
def azim_proj(pos):
"""
Computes the Azimuthal Equidistant Projection of input point in 3D Cartesian Coordinates.
Imagine a plane being placed against (tangent to) a globe. If
a light source inside the globe projects the graticule onto
the plane the result would be a planar, or azimuthal, map
projection.
:param pos: position in 3D Cartesian coordinates
:return: projected coordinates using Azimuthal Equidistant Projection
"""
[r, elev, az] = cart2sph(pos[0], pos[1], pos[2])
return pol2cart(az, np.pi / 2 - elev)
def gen_images(locs, features, n_gridpoints, normalize=True,
augment=False, pca=False, std_mult=0.1, n_components=2, edgeless=False):
"""
Generates EEG images given electrode locations in 2D space and multiple feature values for each electrode
:param locs: An array with shape [n_electrodes, 2] containing X, Y
coordinates for each electrode.
:param features: Feature matrix as [n_samples, n_features]
Features are as columns.
Features corresponding to each frequency band are concatenated.
(alpha1, alpha2, ..., beta1, beta2,...)
:param n_gridpoints: Number of pixels in the output images
:param normalize: Flag for whether to normalize each band over all samples
:param augment: Flag for generating augmented images
:param pca: Flag for PCA based data augmentation
:param std_mult Multiplier for std of added noise
:param n_components: Number of components in PCA to retain for augmentation
:param edgeless: If True generates edgeless images by adding artificial channels
at four corners of the image with value = 0 (default=False).
:return: Tensor of size [samples, colors, W, H] containing generated
images.
"""
feat_array_temp = []
nElectrodes = locs.shape[0] # Number of electrodes
# Test whether the feature vector length is divisible by number of electrodes
# assert features.shape[1] % nElectrodes == 0
n_colors = int(features.shape[1] / nElectrodes)
for c in range(n_colors):
feat_array_temp.append(features[:, c * nElectrodes: nElectrodes * (c + 1)])
if augment:
if pca:
for c in range(n_colors):
feat_array_temp[c] = augment_EEG(feat_array_temp[c], std_mult, pca=True, n_components=n_components)
else:
for c in range(n_colors):
feat_array_temp[c] = augment_EEG(feat_array_temp[c], std_mult, pca=False, n_components=n_components)
nSamples = features.shape[0]
# Interpolate the values
grid_x, grid_y = np.mgrid[
min(locs[:, 0]):max(locs[:, 0]):n_gridpoints * 1j,
min(locs[:, 1]):max(locs[:, 1]):n_gridpoints * 1j
]
temp_interp = []
for c in range(n_colors):
temp_interp.append(np.zeros([nSamples, n_gridpoints, n_gridpoints]))
# Generate edgeless images
if edgeless:
min_x, min_y = np.min(locs, axis=0)
max_x, max_y = np.max(locs, axis=0)
locs = np.append(locs, np.array([[min_x, min_y], [min_x, max_y], [max_x, min_y], [max_x, max_y]]), axis=0)
for c in range(n_colors):
feat_array_temp[c] = np.append(feat_array_temp[c], np.zeros((nSamples, 4)), axis=1)
# Interpolating
for i in xrange(nSamples):
for c in range(n_colors):
temp_interp[c][i, :, :] = griddata(locs, feat_array_temp[c][i, :], (grid_x, grid_y),
method='cubic', fill_value=np.nan)
print'Interpolating {0}/{1}\r'.format(i + 1, nSamples, end='\r')
# Normalizing
for c in range(n_colors):
if normalize:
temp_interp[c][~np.isnan(temp_interp[c])] = \
scale(temp_interp[c][~np.isnan(temp_interp[c])])
temp_interp[c] = np.nan_to_num(temp_interp[c])
return np.swapaxes(np.asarray(temp_interp), 0, 1) # swap axes to have [samples, colors, W, H]
def build_cnn(input_var=None, n_layers=(4, 2, 1), n_filters_first=32,
imsize=32, n_colors=3):
"""
:param input_var:
:param n_layers:
:param n_filters_first:
:param imsize:
:param n_colors:
:return:
"""
count = 0
# Input layer
network = Input(shape=(n_colors, imsize, imsize), tensor=input_var)
for i, s in enumerate(n_layers):
for l in range(s):
network = Conv2D(n_filters_first * (2 ** i), (3, 3), padding='same',
data_format='channels_first')(network)
count += 1
#weights.append(network.weights)
network = MaxPooling2D(pool_size=(2, 2))(network)
return network
if __name__ == '__main__':
# Load electrode locations
print('Loading data...')
locs = sio.loadmat('sample_data/Neuroscan_locs_orig.mat')
locs_3d = locs['A']
locs_2d = []
# Convert to 2D
for e in locs_3d:
locs_2d.append(azim_proj(e))
feats = sio.loadmat('sample_data/FeatureMat_timeWin.mat')['features']
subj_nums = np.squeeze(sio.loadmat('sample_data/trials_subNums.mat')['subjectNum'])
# Leave-Subject-Out cross validation
fold_pairs = []
for i in np.unique(subj_nums):
ts = subj_nums == i
tr = np.squeeze(np.nonzero(np.bitwise_not(ts)))
ts = np.squeeze(np.nonzero(ts))
np.random.shuffle(tr) # Shuffle indices
np.random.shuffle(ts)
fold_pairs.append((tr, ts))
# Conv-LSTM Mode
print('Generating images for all time windows...')
#images_timewin = np.array(
# [gen_images(np.array(locs_2d),
# feats[:, i * 192:(i + 1) * 192], 32,
# normalize=False)
# for i in range(feats.shape[1] / 192)])
images_timewin = np.load('sample_data/images_timewin.npy')
num_classes = 4
grad_clip = 100
imsize = 32
n_colors = 3
n_timewin = 3
labels = np.squeeze(feats[:, -1]) - 1
images = images_timewin
fold = fold_pairs[2]
(X_train, y_train), (X_val, y_val), (X_test, y_test) = reformatInput(images, labels, fold)
X_train = X_train.astype("float32", casting='unsafe')
X_val = X_val.astype("float32", casting='unsafe')
X_test = X_test.astype("float32", casting='unsafe')
print('Building LSTM-Conv Model')
main_input = Input(shape=(None, 3, imsize, imsize))
convnets = []
# Build 7 parallel CNNs with shared weights
#for i in range(n_timewin):
# convnet = build_cnn(main_input[i], imsize=imsize, n_colors=n_colors)
# convnets.append(Flatten()(convnet))
in1 = Input(shape=(3,imsize,imsize))
net1 = Conv2D(32, 3, padding='same', data_format='channels_first')(in1)
net1 = Conv2D(32, 3, padding='same', data_format='channels_first')(net1)
net1 = Conv2D(32, 3, padding='same', data_format='channels_first')(net1)
net1 = Conv2D(32, 3, padding='same', data_format='channels_first')(net1)
net1 = MaxPooling2D(pool_size=(2, 2))(net1)
net1 = Conv2D(64, 3, padding='same', data_format='channels_first')(net1)
net1 = Conv2D(64, 3, padding='same', data_format='channels_first')(net1)
net1 = MaxPooling2D(pool_size=(2, 2))(net1)
net1 = Conv2D(128, 3, padding='same', data_format='channels_first')(net1)
net1 = MaxPooling2D(pool_size=(2, 2))(net1)
net1 = Flatten()(net1)
in2 = Input(shape=(3, imsize, imsize))
net2 = Conv2D(32, 3, padding='same', data_format='channels_first')(in2)
net2 = Conv2D(32, 3, padding='same', data_format='channels_first')(net2)
net2 = Conv2D(32, 3, padding='same', data_format='channels_first')(net2)
net2 = Conv2D(32, 3, padding='same', data_format='channels_first')(net2)
net2 = MaxPooling2D(pool_size=(2, 2))(net2)
net2 = Conv2D(64, 3, padding='same', data_format='channels_first')(net2)
net2 = Conv2D(64, 3, padding='same', data_format='channels_first')(net2)
net2 = MaxPooling2D(pool_size=(2, 2))(net2)
net2 = Conv2D(128, 3, padding='same', data_format='channels_first')(net2)
net2 = MaxPooling2D(pool_size=(2, 2))(net2)
net2 = Flatten()(net2)
in3 = Input(shape=(3, imsize, imsize))
net3 = Conv2D(32, 3, padding='same', data_format='channels_first')(in3)
net3 = Conv2D(32, 3, padding='same', data_format='channels_first')(net3)
net3 = Conv2D(32, 3, padding='same', data_format='channels_first')(net3)
net3 = Conv2D(32, 3, padding='same', data_format='channels_first')(net3)
net3 = MaxPooling2D(pool_size=(2, 2))(net3)
net3 = Conv2D(64, 3, padding='same', data_format='channels_first')(net3)
net3 = Conv2D(64, 3, padding='same', data_format='channels_first')(net3)
net3 = MaxPooling2D(pool_size=(2, 2))(net3)
net3 = Conv2D(128, 3, padding='same', data_format='channels_first')(net3)
net3 = MaxPooling2D(pool_size=(2, 2))(net3)
net3 = Flatten()(net3)
#convpool = concatenate(convnets)
convpool = concatenate([net1, net2, net3])
convpool = Reshape((n_timewin, -1))(convpool)
conv_out = Permute((2,1))(convpool)
conv_out = Conv1D(64, 3)(conv_out)
conv_out = Flatten()(conv_out)
lstm_out = LSTM(128, activation='tanh')(convpool)
dense_input = concatenate([lstm_out, conv_out])
main_output = Dropout(0.5)(dense_input)
main_output = Dense(512, activation='relu')(main_output)
main_output = Dense(num_classes, activation='softmax')(main_output)
lstm_conv = Model(inputs=[in1, in2, in3], outputs=main_output)
#plot_model(lstm_conv, 'mix.png')
opt = adam(clipvalue=100)
lstm_conv.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
y_train_cat = to_categorical(y_train)
y_val_cat = to_categorical(y_val)
y_test_cat = to_categorical(y_test)
num_epochs = 5
print('Training the LSTM-CONV Model...')
X_train = [X_train[0], X_train[1], X_train[2]]
lstm_conv.fit(X_train, y_train_cat, epochs=num_epochs)
print('Done!')
| gpl-3.0 |
velizarefremov/vascularnetworks | skeletonize.py | 1 | 1055 | import os
import sys
import numpy as np
from itkutilities import get_itk_array, write_itk_imageArray
import utility
from skimage.morphology import skeletonize_3d
import matplotlib.pyplot as plt
display = False
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " <testData> <output>")
sys.exit(1)
inputfile = sys.argv[1]
outputfile = sys.argv[2]
inputimg = np.array(get_itk_array(inputfile), dtype="uint8")
# perform skeletonization
skeleton = skeletonize_3d(inputimg)
if display == True:
# display results
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4),
sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
ax[0].imshow(inputimg[55], cmap=plt.cm.gray)
ax[0].axis('off')
ax[0].set_title('original', fontsize=20)
ax[1].imshow(skeleton[55], cmap=plt.cm.gray)
ax[1].axis('off')
ax[1].set_title('skeleton', fontsize=20)
fig.tight_layout()
plt.show()
write_itk_imageArray(skeleton, outputfile)
| gpl-3.0 |
jdavidrcamacho/Tests_GP | 02 - Programs being tested/06 - spots tests/Test_ES_3spots.py | 1 | 8206 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 3 17:46:39 2017
@author: camacho
"""
import Kernel;reload(Kernel);kl=Kernel
import Kernel_likelihood;reload(Kernel_likelihood);lk=Kernel_likelihood
import Kernel_optimization;reload(Kernel_optimization);opt=Kernel_optimization
import RV_function;reload(RV_function);RVfunc=RV_function
import numpy as np;np.random.seed(1234)
import matplotlib.pylab as pl
import astropy.table as Table
import sys
f=open("Test_ES_3spots.txt","w")
sys.stdout = f
pl.close('all')
##### spots data pre-processing #####
rdb_data=Table.Table.read('3spots.rdb',format='ascii')
RV_spot=rdb_data['RV_tot'][1:101]
RV_spot=np.array(RV_spot)
RV_spot=RV_spot.astype('Float64')
RV_SPOT=np.concatenate((RV_spot,RV_spot,RV_spot,RV_spot),axis=0)
spots_yy=[]
for i in np.arange(4,401,4):
a=(RV_SPOT[i-4]+RV_SPOT[i-3]+RV_SPOT[i-2]+RV_SPOT[i-1])*1000/4.
spots_yy.append(a)
spots_data=[]
for j in np.arange(1,100,3.3):
spots_data.append(spots_yy[int(round(j))])
##### data and plot #####
# Period(P) ~ 20 e 50 days
# Observations(space) ~ every 4 days
# Error(yerr) ~ 0.20 a 0.50 m
# K=17.353 => planet with 1/4 mass of Jupiter
test1=RVfunc.RV_circular(P=25,K=17.353,T=0,gamma=0,time=100,space=30)
t=np.linspace(0,100,30) #np.linspace(0,time,space)
y0=np.array(test1[1])
yerr=np.array([np.random.uniform(0.2,0.5) for x in y0])
y=np.array([x1+x2 for x1,x2 in zip(y0,spots_data)])
total=np.array([x1+x2 for x1,x2 in zip(y,yerr)])
Xfinal=t
Yfinal=total
##### Lets try GP to fit #####
#kl.ExpSineSquared(theta,l,P) + kl.WhiteNoise(theta)
def sub_tests(trials=20,variation=-0.1):
theta=17.0;l=1.0;P=25.0
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.ExpSquared(theta, l)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta+variation;l=l;P=P+variation
def subNoise_tests(trials=20,variation=-0.1):
theta=17.0;l=1.0;P=24.0;noise=0.5
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.ExpSquared(theta, l)+kl.WhiteNoise(noise)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta+variation;l=l;P=P+variation;noise=noise+(variation/2.)
def add_tests(trials=20,variation=0.1):
theta=17.0;l=1.0;P=25.0
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.ExpSquared(theta, l)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta+variation;l=l;P=P+variation
def addNoise_tests(trials=20,variation=0.1):
theta=17.0;l=1.0;P=24.0;noise=0.1
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.ExpSquared(theta, l)+kl.WhiteNoise(noise)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta;l=l;noise=noise+(variation/2.)
sub_tests()
print ''
subNoise_tests()
print ''
add_tests()
print ''
addNoise_tests()
print ''
#for when everything ends
f.close() | mit |
musically-ut/numpy | numpy/lib/recfunctions.py | 148 | 35012 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| bsd-3-clause |
alexeyum/scikit-learn | examples/text/document_classification_20newsgroups.py | 37 | 10499 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
tkchafin/mrbait | mrbait/mrbait_corefuncs_parallel.py | 1 | 18041 | #!/usr/bin/python
import sys
import sqlite3
import getopt
import Bio
import os
import time
from Bio import AlignIO
from mrbait import mrbait_menu
from mrbait import substring
from mrbait.substring import SubString
from functools import partial
from mrbait import manage_bait_db as m
from mrbait import alignment_tools as a
from mrbait import sequence_tools as s
from mrbait import misc_utils as utils
from mrbait import seq_graph as graph
from mrbait import aln_file_tools
from mrbait import vcf_tools
from mrbait import vsearch
from mrbait import gff3_parser as gff
from mrbait import blast as b
import subprocess
import pandas as pd
import numpy as np
import multiprocessing
"""
Parallel versions of some of the MrBait corefuncs.
Much thanks to SO user 'dano' for 2014 post on how to share lock in multiprocessing pool:
https://stackoverflow.com/questions/25557686/python-sharing-a-lock-between-processes
"""
#Function to load a GFF file into database
def loadGFF_parallel(conn, params):
t = int(params.threads)
#file chunker call
file_list = aln_file_tools.generic_chunker(params.gff, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadGFF_worker, params.db, chunk)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
#worker function version of loadGFF
def loadGFF_worker(db, chunk):
try:
connection = sqlite3.connect(db)
#For each GFF record in params.gff
for record in gff.read_gff(chunk):
#Skip any records that are missing the sequence ID, or coordinates
if record.seqid == "NULL" or record.start == "NULL" or record.end == "NULL":
continue
if record.start > record.end:
temp = record.start
record.start = record.end
record.end = temp
#Get the alias, if it exists
alias = ""
if record.getAlias(): #returns false if no alias
alias = record.getAlias()
else:
alias = "NULL"
#NOTE: This function ONLY inserts GFFRecords where record.seqid matches an existing locus in the loci table
lock.acquire()
m.add_gff_record(connection, record.seqid, record.type.lower(), record.start, record.end, alias)
lock.release()
connection.close()
except Exception as e:
raise Exception(e.message)
#Function to load a GFF file into database
def loadBED_parallel(conn, params):
t = int(params.threads)
#file chunker call
file_list = aln_file_tools.generic_chunker(params.bed, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadBED_worker, params.db, params.bed_header)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
#worker function version of loadGFF
def loadBED_worker(db, bed_header, chunk):
try:
connection = sqlite3.connect(db)
with open(chunk)as f:
count=0
for line in f:
line = line.strip()
if not line:
continue
count+=1
if count <= bed_header:
continue
content = line.split()
#NOTE: This function ONLY inserts BEDRecords where record.seqid matches an existing locus in the loci table
lock.acquire()
print(content)
m.add_bed_record(connection, content[0], content[1], content[2])
lock.release()
connection.close()
except Exception as e:
raise Exception(e.message)
#Function to load BED file
def loadBED(conn, params):
with open(params.bed)as f:
count=0
for line in f:
line = line.strip()
if not line:
continue
count+=1
if count <= params.bed_header:
continue
content = line.split()
#NOTE: This function ONLY inserts BEDRecords where record.seqid matches an existing locus in the loci table
m.add_bed_record(conn, content[0], content[1], content[2])
#remove BED records not falling within our loci
#print(m.getBED(conn))
m.validateBEDRecords(conn)
#print(m.getBED(conn))
#Function to load a XMFA file into database
def loadXMFA_parallel(conn, params):
t = int(params.threads)
numLoci = aln_file_tools.countXMFA(params.xmfa)
if numLoci < 10000:
print("\t\t\tReading",numLoci,"alignments.")
else:
print("\t\t\tReading",numLoci,"alignments... This may take a while.")
#file chunker call
file_list = aln_file_tools.xmfa_chunker(params.xmfa, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadXMFA_worker, params.db, params.cov, params.minlen, params.thresh, params.mask, params.maf)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
#worker function version of loadMAF
def loadXMFA_worker(db, params_cov, params_minlen, params_thresh, params_mask, params_maf, chunk):
try:
connection = sqlite3.connect(db)
#Parse MAF file and create database
for aln in AlignIO.parse(chunk, "mauve"):
#NOTE: Add error handling, return error code
cov = len(aln)
alen = aln.get_alignment_length()
if cov < params_cov or alen < params_minlen:
continue
#Add each locus to database
locus = a.consensAlign(aln, threshold=params_thresh, mask=params_mask, maf=params_maf)
lock.acquire()
locid = m.add_locus_record(connection, cov, locus.conSequence, 1, "NULL")
lock.release()
connection.close()
except Exception as e:
raise Exception(e.message)
#Function to load LOCI file in parallel
def loadLOCI_parallel(conn, params):
"""
Format:
multiprocessing pool.
Master:
splits file into n chunks
creates multiprocessing pool
Workers:
read file chunk
calculate consensus
grab lock
INSERT data to SQL database
release lock
"""
t = int(params.threads)
numLoci = aln_file_tools.countLoci(params.loci)
if numLoci < 10000:
print("\t\t\tReading",numLoci,"alignments.")
else:
print("\t\t\tReading",numLoci,"alignments... This may take a while.")
#file chunker call
file_list = aln_file_tools.loci_chunker(params.loci, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadLOCI_worker, params.db, params.cov, params.minlen, params.thresh, params.mask, params.maf)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
#Function to load MAF file in parallel
def loadMAF_parallel(conn, params):
t = int(params.threads)
numLoci = aln_file_tools.countMAF(params.alignment)
if numLoci < 10000:
print("\t\t\tReading",numLoci,"alignments.")
else:
print("\t\t\tReading",numLoci,"alignments... This may take a while.")
#file chunker call
file_list = aln_file_tools.maf_chunker(params.alignment, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadMAF_worker, params.db, params.cov, params.minlen, params.thresh, params.mask, params.maf)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
# #first chunking, then arsing in parallel
# def loadVCF_parallel(conn, params):
# t = int(params.threads)
# #file chunker call
# file_list = vcf_tools.vcf_chunker(params.vcf, t, params.workdir)
#
# print("Files are:",file_list)
# #Initialize multiprocessing pool
# #if 'lock' not in globals():
# lock = multiprocessing.Lock()
# try:
# with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
# func = partial(loadVCF_worker, params.db, params.thresh)
# results = pool.map(func, file_list)
# except Exception as e:
# pool.close()
# pool.close()
# pool.join()
#
# #reset_lock()
# #Remove chunkfiles
# #aln_file_tools.removeChunks(params.workdir)
#INitialize a global lock. Doing it this way allows it to be inherited by the child processes properly
#Found on StackOverflow: https://stackoverflow.com/questions/25557686/python-sharing-a-lock-between-processes
#Thanks go to SO user dano
def init(l):
global lock
lock = l
#Function to reset lock
def reset_lock():
global lock
del lock
#NOTE: 'params' object can't be pickled, so I have to do it this way.
#worker function version of loadMAF
def loadMAF_worker(db, params_cov, params_minlen, params_thresh, params_mask, params_maf, chunk):
try:
connection = sqlite3.connect(db)
#Parse MAF file and create database
for aln in AlignIO.parse(chunk, "maf"):
#NOTE: Add error handling, return error code
cov = len(aln)
alen = aln.get_alignment_length()
if cov < params_cov or alen < params_minlen:
continue
#Add each locus to database
locus = a.consensAlign(aln, threshold=params_thresh, mask=params_mask, maf=params_maf)
lock.acquire()
locid = m.add_locus_record(connection, cov, locus.conSequence, 1, "NULL")
#print(locid)
lock.release()
#Extract variable positions for database
#for var in locus.alnVars:
#m.add_variant_record(connection, locid, var.position, var.value)
connection.close()
except Exception as e:
raise Exception(e.message)
# #Function to load VCF variants file
# def loadVCF_worker(db, threshold, chunk):
# try:
# #Each worker opens unique connection to db
# connection = sqlite3.connect(db)
# #Lock DB and read loci, then release lock
# lock.acquire()
# loci = m.getPassedLoci(connection) #get DF of passed loci
# lock.release()
#
# chrom_lookup = loci.set_index('chrom')['id'].to_dict()
# loci.set_index('id', inplace=True)
#
# passed=0 #To track number of VCF records for which no locus exists
# failed=0
# for reclist in vcf_tools.read_vcf(chunk):
# rec_chrom = reclist[0].CHROM
# if rec_chrom in chrom_lookup:
# locid = chrom_lookup[rec_chrom]
# passed+=1
# #Grab DF record for the matching CHROM
# seq = loci.loc[locid,'consensus']
# #Get new consensus sequence given VCF records
# new_cons = vcf_tools.make_consensus_from_vcf(seq,rec_chrom,reclist, threshold)
# print(new_cons)
# #Update new consensus seq in db
# if len(new_cons) != len(seq): #Check length first
# print("\t\t\tWarning: New consensus sequence for locus %s (locid=<%s>) is the wrong length! Skipping."%(rec_chrom, locid))
# else:
# #Lock database for update, then relase lock
# lock.acquire()
# m.updateConsensus(connection, locid, new_cons)
# lock.release()
# else:
# failed+=1
# if failed > 0:
# print("\t\t\tWARNING:%s/%s records in <%s> don't match any reference sequences"%(failed, failed+passed, chunk))
# #close connection
# connection.close()
# except Exception as e:
# raise Exception(e.message)
#Worker function for loadLOCI_parallel
def loadLOCI_worker(db, params_cov, params_minlen, params_thresh, params_mask, params_maf, chunk):
try:
connection = sqlite3.connect(db)
#Parse LOCI file and create database
for aln in aln_file_tools.read_loci(chunk):
#NOTE: Add error handling, return error code
cov = len(aln)
alen = aln.get_alignment_length()
#Skip if coverage or alignment length too short
if cov < params_cov or alen < params_minlen:
#print("Locus skipped")
continue
else:
#Add each locus to database
locus = a.consensAlign(aln, threshold=params_thresh, mask=params_mask, maf=params_maf)
#Acquire lock, submit to Database
lock.acquire()
locid = m.add_locus_record(connection, cov, locus.conSequence, 1, "NULL")
lock.release()
#print("Loading Locus #:",locid)
#Extract variable positions for database
#for var in locus.alnVars:
#m.add_variant_record(connection, locid, var.position, var.value)
connection.close()
except Exception as e:
raise Exception(e.message)
#Function to discover target regions using a sliding windows through passedLoci
def targetDiscoverySlidingWindow_parallel(conn, params, loci):
"""
Format:
1. Write pandas DF to n chunk files
2. List of chunk file names
3. Pass 1 chunk file to each worker in a multiprocessing pool.
Master:
creates n chunk files
creates multiprocessing pool
Workers:
read file chunk
calculate consensus
grab lock
INSERT data to SQL database
release lock
"""
t = int(params.threads)
chunk = 1
loci_num = int(loci.shape[0])
#print("number of loci:",loci_num)
#print("number of threads:",t)
chunks = 0
if loci_num < t:
chunks = loci_num
else:
chunks = t
chunk_size = loci_num // chunks
remainder = loci_num % chunks
#print("Chunk size is:",chunk_size)
#print("remainder is:",remainder)
start = 0
stop = 0
files = list()
#Split loci DataFrame into chunks, and keep list of chunk files
for df_chunk in np.array_split(loci, chunks):
size = df_chunk.shape[0]
#print("size of chunk",chunk,"is:",size)
chunk_file = params.workdir + "/." + str(chunk) + ".chunk"
#print(df_chunk)
df_chunk.to_csv(chunk_file, mode="w", index=False)
files.append(chunk_file)
chunk += 1
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(targetDiscoverySlidingWindow_worker, params.db, params.win_shift, params.win_width, params.var_max, params.numN, params.numG, params.blen, params.flank_dist, params.target_all)
results = pool.map(func, files)
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
d = os.listdir(params.workdir)
for item in d:
if item.endswith(".chunk"):
os.remove(os.path.join(params.workdir, item))
#Function to discover target regions using a sliding windows through passedLoci
def targetDiscoverySlidingWindow_worker(db, shift, width, var, n, g, blen, flank_dist, target_all, chunk):
connection = sqlite3.connect(db)
#print("process: reading hdf from",chunk)
loci = pd.read_csv(chunk)
#print(loci)
for seq in loci.itertuples():
#print(seq)
start = 0
stop = 0
if target_all:
#print("target_all")
#submit full locus as target
seq_norm = s.simplifySeq(seq[2])
counts = s.seqCounterSimple(seq_norm)
if counts['*'] <= var and counts['N'] <= n and counts['-'] <= g:
target = seq[2]
tr_counts = s.seqCounterSimple(seq_norm)
n_mask = utils.n_lower_chars(seq[2])
n_gc = s.gc_counts(seq[2])
#NOTE: flank count set to number of variable sites in whole locus
#print(int(seq[1]), 0, len(seq[2]), seq[2], tr_counts, tr_counts, n_mask, n_gc)
lock.acquire()
m.add_region_record(connection, int(seq[1]), 0, len(seq[2]), seq[2], tr_counts, tr_counts, n_mask, n_gc)
lock.release()
else:
#print("\nConsensus: ", seq[2], "ID is: ", seq[1], "\n")
generator = s.slidingWindowGenerator(seq[2], shift, width)
for window_seq in generator():
seq_norm = s.simplifySeq(window_seq[0])
counts = s.seqCounterSimple(seq_norm)
#If window passes filters, extend current bait region
#print("Start is ", start, " and stop is ",stop) #debug print
if counts['*'] <= var and counts['N'] <= n and counts['-'] <= g:
stop = window_seq[2]
#if this window passes BUT is the last window, evaluate it
if stop == len(seq[2]):
#print("last window")
if (stop - start) >= blen:
target = (seq[2])[start:stop]
tr_counts = s.seqCounterSimple(s.simplifySeq(target))
#print("candidate:",window_seq[0])
n_mask = utils.n_lower_chars(target)
n_gc = s.gc_counts(target)
#Check that there aren't too many SNPs
#if tr_counts["*"] <= params.vmax_r:
#print(" Target region: ", target)
#Submit target region to database
#print("process: grabbing lock")'
flank_counts = s.getFlankCounts(seq[2], start, stop, flank_dist)
lock.acquire()
m.add_region_record(connection, int(seq[1]), start, stop, target, tr_counts, flank_counts, n_mask, n_gc)
#print("process: releasing lock")
lock.release()
#set start of next window to end of current TR
generator.setI(stop)
else:
#If window fails, check if previous bait region passes to submit to DB
#print (stop-start)
if (stop - start) >= blen:
target = (seq[2])[start:stop]
tr_counts = s.seqCounterSimple(s.simplifySeq(target))
n_mask = utils.n_lower_chars(target)
n_gc = s.gc_counts(target)
#Check that there aren't too many SNPs
#if tr_counts["*"] <= params.vmax_r:
#print(" Target region: ", target)
#Submit target region to database
#print("process: grabbing lock")'
flank_counts = s.getFlankCounts(seq[2], start, stop, flank_dist)
lock.acquire()
m.add_region_record(connection, int(seq[1]), start, stop, target, tr_counts, flank_counts, n_mask, n_gc)
#print("process: releasing lock")
lock.release()
#set start of next window to end of current TR
generator.setI(stop)
#If bait fails, set start to start point of next window
start = generator.getI()+shift
connection.close()
#Function to get DataFrame of targets + flank regions, and calculate some stuff
def flankDistParser_parallel(conn, dist):
#Call manage_bait_db function to return DataFrame
targets = m.getTargetFlanks(conn, dist)
| gpl-3.0 |
eco32i/ggplot | ggplot/scales/scale_colour_gradient.py | 1 | 1219 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, rgb2hex, ColorConverter
def colors_at_breaks(cmap, breaks=[0, 0.25, 0.5, 0.75, 1.]):
return [rgb2hex(cmap(bb)[:3]) for bb in breaks]
class scale_colour_gradient(scale):
VALID_SCALES = ['name', 'limits', 'low', 'mid', 'high']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.name:
gg.color_label = self.name
if self.limits:
gg.color_limits = self.limits
color_spectrum = []
if self.low:
color_spectrum.append(self.low)
if self.mid:
color_spectrum.append(self.mid)
if self.high:
color_spectrum.append(self.high)
if self.low and self.high:
gradient2n = LinearSegmentedColormap.from_list('gradient2n', color_spectrum)
plt.cm.register_cmap(cmap=gradient2n)
# add them back to ggplot
gg.color_scale = colors_at_breaks(gradient2n)
gg.colormap = gradient2n
return gg
| bsd-2-clause |
walterreade/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
srepho/BDA_py_demos | demos_ch2/demo2_3.py | 19 | 1931 | """Bayesian Data Analysis, 3rd ed
Chapter 2, demo 3
Simulate samples from Beta(438,544), draw a histogram with quantiles, and do
the same for a transformed variable.
"""
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Plotting grid
x = np.linspace(0.36, 0.54, 150)
# Draw n random samples from Beta(438,544)
n = 10000
th = beta.rvs(438, 544, size=n) # rvs comes from `random variates`
# Plot 2 subplots
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 10))
# Plot histogram
axes[0].hist(th, bins=30)
# Compute 2.5% and 97.5% quantile approximation using samples
th25, th975 = np.percentile(th, [2.5, 97.5])
# Draw lines for these
axes[0].axvline(th25, color='#e41a1c', linewidth=1.5)
axes[0].axvline(th975, color='#e41a1c', linewidth=1.5)
axes[0].text(th25, axes[0].get_ylim()[1]+15, '2.5%',
horizontalalignment='center')
axes[0].text(th975, axes[0].get_ylim()[1]+15, '97.5%',
horizontalalignment='center')
axes[0].set_xlabel(r'$\theta$', fontsize=18)
axes[0].set_yticks(())
# Plot histogram for the transformed variable
phi = (1-th)/th
axes[1].hist(phi, bins=30)
# Compute 2.5% and 97.5% quantile approximation using samples
phi25, phi975 = np.percentile(phi, [2.5, 97.5])
# Draw lines for these
axes[1].axvline(phi25, color='#e41a1c', linewidth=1.5)
axes[1].axvline(phi975, color='#e41a1c', linewidth=1.5)
axes[1].text(phi25, axes[1].get_ylim()[1]+15, '2.5%',
horizontalalignment='center')
axes[1].text(phi975, axes[1].get_ylim()[1]+15, '97.5%',
horizontalalignment='center')
axes[1].set_xlabel(r'$\phi$', fontsize=18)
axes[1].set_yticks(())
# Display the figure
plt.show()
| gpl-3.0 |
krafczyk/spack | var/spack/repos/builtin/packages/py-multiqc/package.py | 5 | 2328 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyMultiqc(PythonPackage):
"""MultiQC is a tool to aggregate bioinformatics results across many
samples into a single report. It is written in Python and contains modules
for a large number of common bioinformatics tools."""
homepage = "https://multiqc.info"
url = "https://pypi.io/packages/source/m/multiqc/multiqc-1.0.tar.gz"
version('1.5', 'c9fc5f54a75b1d0c3e119e0db7f5fe72')
version('1.3', '78fef8a89c0bd40d559b10c1f736bbcd')
version('1.0', '0b7310b3f75595e5be8099fbed2d2515')
depends_on('[email protected]:')
depends_on('py-setuptools', type='build')
depends_on('py-click', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-lzstring', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-spectra', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
depends_on('py-simplejson', type=('build', 'run'))
| lgpl-2.1 |
pratapvardhan/pandas | pandas/tests/frame/test_operators.py | 2 | 43613 | # -*- coding: utf-8 -*-
from __future__ import print_function
from collections import deque
from datetime import datetime
from decimal import Decimal
import operator
import pytest
from numpy import nan, random
import numpy as np
from pandas.compat import range
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.core.common as com
import pandas.io.formats.printing as printing
import pandas as pd
from pandas.util.testing import (assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import (TestData, _check_mixed_float,
_check_mixed_int)
class TestDataFrameOperators(TestData):
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
# Test for issue #10181
for dtype in ('float', 'int64'):
frames = [
DataFrame(dtype=dtype),
DataFrame(columns=['A'], dtype=dtype),
DataFrame(index=[0], dtype=dtype),
]
for df in frames:
assert (df + df).equals(df)
assert_frame_equal(df + df, df)
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
# GH 5808
# empty frames, non-mixed dtype
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1, 2])
assert_frame_equal(result, DataFrame(index=[1, 2]))
result = DataFrame(index=[1], columns=['A']) & DataFrame(
index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) & DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
# boolean ops
result = DataFrame(1, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(1, index=[1], columns=['A']))
def f():
DataFrame(1.0, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def f():
DataFrame('foo', index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
# since filling converts dtypes from object, changed expected to be
# object
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
pytest.raises(TypeError, lambda: x == y)
pytest.raises(TypeError, lambda: x != y)
pytest.raises(TypeError, lambda: x >= y)
pytest.raises(TypeError, lambda: x > y)
pytest.raises(TypeError, lambda: x < y)
pytest.raises(TypeError, lambda: x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df, df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(
df)), 'b': date_range('20100101', periods=len(df))})
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
assert_frame_equal(result, expected)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
assert_frame_equal(result, expected)
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
@pytest.mark.parametrize('op,res', [('__eq__', False),
('__ne__', True)])
def test_logical_typeerror_with_non_valid(self, op, res):
# we are comparing floats vs a string
result = getattr(self.frame, op)('foo')
assert bool(result.all().all()) is res
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False, downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
@pytest.mark.parametrize('df,expected', [
(pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})),
(pd.DataFrame({'a': [False, True]}),
pd.DataFrame({'a': [True, False]})),
(pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))}))
])
def test_neg_numeric(self, df, expected):
assert_frame_equal(-df, expected)
assert_series_equal(-df['a'], expected['a'])
@pytest.mark.parametrize('df, expected', [
(np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
([Decimal('1.0'), Decimal('2.0')], [Decimal('-1.0'), Decimal('-2.0')]),
])
def test_neg_object(self, df, expected):
# GH 21380
df = pd.DataFrame({'a': df})
expected = pd.DataFrame({'a': expected})
assert_frame_equal(-df, expected)
assert_series_equal(-df['a'], expected['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': ['a', 'b']}),
pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
])
def test_neg_raises(self, df):
with pytest.raises(TypeError):
(- df)
with pytest.raises(TypeError):
(- df['a'])
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': [-1, 1]}),
pd.DataFrame({'a': [False, True]}),
pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
])
def test_pos_numeric(self, df):
# GH 16073
assert_frame_equal(+df, df)
assert_series_equal(+df['a'], df['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': ['a', 'b']}),
pd.DataFrame({'a': np.array([-1, 2], dtype=object)}),
pd.DataFrame({'a': [Decimal('-1.0'), Decimal('2.0')]}),
])
def test_pos_object(self, df):
# GH 21380
assert_frame_equal(+df, df)
assert_series_equal(+df['a'], df['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
])
def test_pos_raises(self, df):
with pytest.raises(TypeError):
(+ df)
with pytest.raises(TypeError):
(+ df['a'])
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
# rops
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(
2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(
2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
except:
printing.pprint_thing("Failing operation %r" % op)
raise
# ndim >= 3
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
msg = "Unable to coerce to Series/DataFrame"
with tm.assert_raises_regex(ValueError, msg):
f(self.frame, ndim_5)
with tm.assert_raises_regex(ValueError, msg):
getattr(self.frame, op)(ndim_5)
# res_add = self.frame.add(self.frame)
# res_sub = self.frame.sub(self.frame)
# res_mul = self.frame.mul(self.frame)
# res_div = self.frame.div(2 * self.frame)
# assert_frame_equal(res_add, self.frame + self.frame)
# assert_frame_equal(res_sub, self.frame - self.frame)
# assert_frame_equal(res_mul, self.frame * self.frame)
# assert_frame_equal(res_div, self.frame / (2 * self.frame))
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
# corner cases
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], fill_value=3)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)
def test_arith_flex_zero_len_raises(self):
# GH#19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([])
df_len0 = pd.DataFrame([], columns=['A', 'B'])
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
df.add(ser_len0, fill_value='E')
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
df_len0.sub(df['A'], axis=None, fill_value=3)
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product([list('abc'),
['one', 'two', 'three'],
[1, 2, 3]],
names=['first', 'second', 'third'])
df = DataFrame(np.arange(27 * 3).reshape(27, 3),
index=index,
columns=['value1', 'value2', 'value3']).sort_index()
idx = pd.IndexSlice
for op in ['add', 'sub', 'mul', 'div', 'truediv']:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level='third', axis=0)
expected = pd.concat([opa(df.loc[idx[:, :, i], :], v)
for i, v in x.iteritems()]).sort_index()
assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ['two', 'three'])
result = getattr(df, op)(x, level='second', axis=0)
expected = (pd.concat([opa(df.loc[idx[:, i], :], v)
for i, v in x.iteritems()])
.reindex_like(df).sort_index())
assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([['A', 'B'], ['a', 'b']])
df = DataFrame(np.ones((2, 4), dtype='int64'), columns=midx)
s = pd.Series({'a': 1, 'b': 2})
df2 = df.copy()
df2.columns.names = ['lvl0', 'lvl1']
s2 = s.copy()
s2.index.name = 'lvl1'
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level='lvl1')
res6 = df2.mul(s2, axis=1, level='lvl1')
exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),
columns=midx)
for res in [res1, res2]:
assert_frame_equal(res, exp)
exp.columns.names = ['lvl0', 'lvl1']
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
assert_frame_equal(f(np.nan), o(df, np.nan))
with tm.assert_raises_regex(ValueError, msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
assert not rs.values.any()
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
assert not rs.values.any()
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
df1 = DataFrame({'A': [1, 2]}, index=idx1)
df2 = DataFrame({'A': [1, 1]}, index=idx2)
exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base)
assert_frame_equal(df1 + df2, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH7325
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='int64')
expected = DataFrame([[nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='float64')
expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
@pytest.mark.parametrize('values', [[1, 2], (1, 2), np.array([1, 2]),
range(1, 3), deque([1, 2])])
def test_arith_alignment_non_pandas_object(self, values):
# GH 17901
df = DataFrame({'A': [1, 1], 'B': [1, 1]})
expected = DataFrame({'A': [2, 2], 'B': [3, 3]})
result = df + values
assert_frame_equal(result, expected)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
indexer = added['A'].dropna().index
exp = (self.frame['A'] * 2).copy()
tm.assert_series_equal(added['A'].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added['A'], exp.loc[added['A'].index])
assert np.isnan(added['C'].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added['D']).all()
self_added = self.frame + self.frame
tm.assert_index_equal(self_added.index, self.frame.index)
added_rev = frame_copy + self.frame
assert np.isnan(added['D']).all()
assert np.isnan(added_rev['D']).all()
# corner cases
# empty
plus_empty = self.frame + self.empty
assert np.isnan(plus_empty.values).all()
empty_plus = self.empty + self.frame
assert np.isnan(empty_plus.values).all()
empty_empty = self.empty + self.empty
assert empty_empty.empty
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype='float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype='float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
assert np.isnan(larger_added['E']).all()
# no upcast needed
added = self.mixed_float + series
_check_mixed_float(added)
# vs mix (upcast) as needed
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype=dict(C=None))
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = self.mixed_int + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = self.mixed_int + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = self.tsframe['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = self.tsframe.add(ts, axis='index')
for key, col in compat.iteritems(self.tsframe):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == 'A'
else:
assert result.name is None
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
tm.assert_index_equal(smaller_added.index, self.tsframe.index)
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = self.tsframe.add(ts[:0], axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
result = self.tsframe[:0].add(ts, axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame.mul(ts, axis='index')
assert len(result) == len(ts)
def test_combineFunc(self):
result = self.frame * 2
tm.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
tm.assert_numpy_array_equal(
s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = self.empty * 2
assert result.index is self.empty.index
assert len(result.columns) == 0
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with tm.assert_raises_regex(ValueError,
'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
tm.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with tm.assert_raises_regex(ValueError,
'Can only compare identically'
'-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]['A'] = np.nan
with np.errstate(invalid='ignore'):
expected = missing_df.values < 0
with np.errstate(invalid='raise'):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
l = (2, 2, 2)
tup = tuple(l)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
assert_frame_equal(result, expected)
result = df.values > b
assert_numpy_array_equal(result, expected.values)
result = df > l
assert_frame_equal(result, expected)
result = df > tup
assert_frame_equal(result, expected)
result = df > b_r
assert_frame_equal(result, expected)
result = df.values > b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, df.__gt__, b_c)
pytest.raises(ValueError, df.values.__gt__, b_c)
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
assert_frame_equal(result, expected)
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
result = df == b_r
assert_frame_equal(result, expected)
result = df.values == b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, lambda: df == b_c)
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(np.arange(6).reshape((3, 2)),
columns=list('AB'), index=list('abc'))
expected.index = df.index
expected.columns = df.columns
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.loc[self.frame.index[:-5], ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
assert combined['D'].isna().all()
assert combined2['D'].isna().all()
chunk = combined.loc[combined.index[:-5], ['A', 'B', 'C']]
chunk2 = combined2.loc[combined2.index[:-5], ['A', 'B', 'C']]
exp = self.frame.loc[self.frame.index[:-5],
['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10 * len(columns))
.reshape(-1, len(columns)),
columns=columns, index=range(10))
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._data is s2._data
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._data is df2._data
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._data is df2._data
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({'A': arr.copy(), 'B': 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A': arr.copy() + 1, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A': arr.copy() + 1.5, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
@pytest.mark.parametrize('op', ['add', 'and', 'div', 'floordiv', 'mod',
'mul', 'or', 'pow', 'sub', 'truediv',
'xor'])
def test_inplace_ops_identity2(self, op):
if compat.PY3 and op == 'div':
return
df = DataFrame({'a': [1., 2., 3.],
'b': [1, 2, 3]})
operand = 2
if op in ('and', 'or', 'xor'):
# cannot use floats for boolean ops
df['a'] = [True, False, True]
df_copy = df.copy()
iop = '__i{}__'.format(op)
op = '__{}__'.format(op)
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
def test_alignment_non_pandas(self):
index = ['A', 'B', 'C']
columns = ['X', 'Y', 'Z']
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype=np.int64),
range(1, 4)]:
tm.assert_series_equal(align(df, val, 'index'),
Series([1, 2, 3], index=df.index))
tm.assert_series_equal(align(df, val, 'columns'),
Series([1, 2, 3], index=df.columns))
# length mismatch
msg = 'Unable to coerce to Series, length must be 3: given 2'
for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(align(df, val, 'index'),
DataFrame(val, index=df.index,
columns=df.columns))
tm.assert_frame_equal(align(df, val, 'columns'),
DataFrame(val, index=df.index,
columns=df.columns))
# shape mismatch
msg = 'Unable to coerce to DataFrame, shape must be'
val = np.array([[1, 2, 3], [4, 5, 6]])
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.zeros((3, 3, 3))
with pytest.raises(ValueError):
align(df, val, 'index')
with pytest.raises(ValueError):
align(df, val, 'columns')
| bsd-3-clause |
lbdreyer/cartopy | lib/cartopy/tests/mpl/test_img_transform.py | 1 | 5341 | # (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
import operator
import os
import unittest
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from cartopy import config
from cartopy.tests.mpl import ImageTesting
import cartopy.crs as ccrs
import cartopy.img_transform as im_trans
from functools import reduce
class TestRegrid(unittest.TestCase):
def test_array_dims(self):
# Source data
source_nx = 100
source_ny = 100
source_x = np.linspace(-180.0,
180.0,
source_nx).astype(np.float64)
source_y = np.linspace(-90, 90.0, source_ny).astype(np.float64)
source_x, source_y = np.meshgrid(source_x, source_y)
data = np.arange(source_nx * source_ny,
dtype=np.int32).reshape(source_ny, source_nx)
source_cs = ccrs.Geodetic()
# Target grid
target_nx = 23
target_ny = 45
target_proj = ccrs.PlateCarree()
target_x, target_y, extent = im_trans.mesh_projection(target_proj,
target_nx,
target_ny)
# Perform regrid
new_array = im_trans.regrid(data, source_x, source_y, source_cs,
target_proj, target_x, target_y)
# Check dimensions of return array
self.assertEqual(new_array.shape, target_x.shape)
self.assertEqual(new_array.shape, target_y.shape)
self.assertEqual(new_array.shape, (target_ny, target_nx))
def test_different_dims(self):
# Source data
source_nx = 100
source_ny = 100
source_x = np.linspace(-180.0, 180.0,
source_nx).astype(np.float64)
source_y = np.linspace(-90, 90.0,
source_ny).astype(np.float64)
source_x, source_y = np.meshgrid(source_x, source_y)
data = np.arange(source_nx * source_ny,
dtype=np.int32).reshape(source_ny, source_nx)
source_cs = ccrs.Geodetic()
# Target grids (different shapes)
target_x_shape = (23, 45)
target_y_shape = (23, 44)
target_x = np.arange(reduce(operator.mul, target_x_shape),
dtype=np.float64).reshape(target_x_shape)
target_y = np.arange(reduce(operator.mul, target_y_shape),
dtype=np.float64).reshape(target_y_shape)
target_proj = ccrs.PlateCarree()
# Attempt regrid
with self.assertRaises(ValueError):
im_trans.regrid(data, source_x, source_y, source_cs,
target_proj, target_x, target_y)
@ImageTesting(['regrid_image'])
def test_regrid_image():
# Source data
fname = os.path.join(config["repo_data_dir"], 'raster', 'natural_earth',
'50-natural-earth-1-downsampled.png')
nx = 720
ny = 360
source_proj = ccrs.PlateCarree()
source_x, source_y, _ = im_trans.mesh_projection(source_proj, nx, ny)
data = plt.imread(fname)
# Flip vertically to match source_x/source_y orientation
data = data[::-1]
# Target grid
target_nx = 300
target_ny = 300
target_proj = ccrs.InterruptedGoodeHomolosine()
target_x, target_y, target_extent = im_trans.mesh_projection(target_proj,
target_nx,
target_ny)
# Perform regrid
new_array = im_trans.regrid(data, source_x, source_y, source_proj,
target_proj, target_x, target_y)
# Plot
fig = plt.figure(figsize=(10, 10))
gs = matplotlib.gridspec.GridSpec(nrows=4, ncols=1,
hspace=1.5, wspace=0.5)
# Set up axes and title
ax = plt.subplot(gs[0], frameon=False, projection=target_proj)
plt.imshow(new_array, origin='lower', extent=target_extent)
ax.coastlines()
# Plot each color slice (tests masking)
cmaps = {'red': 'Reds', 'green': 'Greens', 'blue': 'Blues'}
for i, color in enumerate(['red', 'green', 'blue']):
ax = plt.subplot(gs[i + 1], frameon=False, projection=target_proj)
ax.set_title(color)
plt.imshow(new_array[:, :, i], extent=target_extent, origin='lower',
cmap=cmaps[color])
ax.coastlines()
# Tighten up layout
gs.tight_layout(plt.gcf())
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| lgpl-3.0 |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Modeling_and_Simulation_Examples_Dynamic_Examples/upU/coupled_contact_upU_parallel/plot.py | 3 | 3443 |
###########################################################################################################################
# #
# Wet Contact Modelling in Real ESSI #
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# #
# GITHUB:: https://github.com/SumeetSinha #
# #
# Sumeet Kumar Sinha (September,2016) #
# Computational Geomechanics Group #
# University of California, Davis #
# s u m e e t k s i n h a . c o m #
###########################################################################################################################
from __future__ import print_function
import matplotlib.pylab as plt
import numpy as np
import h5py
f = h5py.File('CoupledContactEffect_Consolidation.h5.feioutput','r')
z_index = 30;
pressure = f["/Model/Nodes/Generalized_Displacements"][3,:]
zdisp_at_interface = f["/Model/Nodes/Generalized_Displacements"][z_index,:]
# zdisp_at_top = f["/Model/Nodes/Generalized_Displacements"][79,:]
time = f["/time"][:]
# Plot the pressure figure. Add labels and titles.
plt.figure()
plt.plot(time,pressure/1000,'-',linewidth=2.0,)
plt.grid()
plt.xlabel("Time [s]")
plt.ylabel("Pressure [kPa] ")
plt.savefig("Pressure_Plot.pdf", bbox_inches='tight')
plt.show()
# Plot the displacementn figure. Add labels and titles.
plt.figure()
plt.plot(time,1000*zdisp_at_interface,label="At Surface",linewidth=2.0,)
# plt.plot(time,1000*zdisp_at_top,label="At Interface",linewidth=2.0,)
plt.grid()
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Displacement [mm] ")
plt.savefig("Displacement_Plot.pdf", bbox_inches='tight')
plt.show()
f = h5py.File('Saturated_Contact_Modelling_Consolidation.h5.feioutput','r')
pressure = f["/Model/Nodes/Generalized_Displacements"][3,:]
zdisp_at_interface = f["/Model/Nodes/Generalized_Displacements"][z_index,:]
# zdisp_at_top = f["/Model/Nodes/Generalized_Displacements"][79,:]
time = f["/time"][:]
# Plot the pressure figure. Add labels and titles.
plt.figure()
plt.plot(time,pressure/1000,'-',linewidth=2.0,)
plt.grid()
plt.xlabel("Time [s]")
plt.ylabel("Pressure [kPa] ")
plt.savefig("Pressure_Plot_2.pdf", bbox_inches='tight')
plt.show()
# Plot the displacementn figure. Add labels and titles.
plt.figure()
plt.plot(time,1000*zdisp_at_interface,label="At Surface",linewidth=2.0,)
# plt.plot(time,1000*zdisp_at_top,label="At Interface",linewidth=2.0,)
plt.grid()
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Displacement [mm] ")
plt.savefig("Displacement_Plot_2.pdf", bbox_inches='tight')
plt.show() | cc0-1.0 |
qrqiuren/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py | 24 | 2966 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2+1 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(float(fs)*np.arange(mY.size)/N, mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(float(fs)*np.arange(pY.size)/N, pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY (random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
| agpl-3.0 |
pydata/pandas-gbq | pandas_gbq/_version.py | 1 | 18586 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "pandas_gbq/_version.py"
cfg.versionfile_source = "pandas_gbq/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(
commands, args, cwd=None, verbose=False, hide_stderr=False, env=None
):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(
GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True
)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = (
"unable to parse git-describe output: '%s'" % describe_out
)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(
GITS, ["rev-list", "HEAD", "--count"], cwd=root
)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(
get_keywords(), cfg.tag_prefix, verbose
)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| bsd-3-clause |
gfrd/gfrd | samples/pushpull/plot_tc.py | 1 | 1340 | #!/usr/bin/env python
import sys
import numpy
import scipy.io
import fractionS
from matplotlib.pylab import *
def load_header( filename ):
file = open( filename )
header = []
for line in file.readlines():
if line[0:2] == '#@':
hline = line[2:].lstrip()
header.append( hline )
return header
def add_columns( data, ycolumns ):
y = numpy.array([ data[:,col] for col in ycolumns ])
y = y.sum(0)
return y
def plot_theory( E1, E2, K, maxt ):
frac = fractionS.fraction_Sp( E1, E2, K )
x = [0.0, maxt]
y = [frac,frac]
plot( x, y )
def plot_file( filename ):
ycolumns = [2,]
#ycolumns = [2,6]
#ycolumns = [3,5]
#ycolumns = [2,6,3,5]
header = load_header( filename )
print header
for l in header:
exec( l )
data = load( filename )
x = data[:,0]
y = add_columns( data, ycolumns )
plot_theory( N_K, N_P, Keq, x[-1] )
plot( x, y / S_tot )
import glob
import os
pattern = sys.argv[1]
globpattern = pattern.replace('ALL','*')
figtitle = os.path.basename( os.path.splitext( pattern )[0] )
print title
#print globpattern
filelist = glob.glob( globpattern )
#print filelist
for filename in filelist:
plot_file( filename )
title( figtitle )
savefig( 'figs/' + figtitle + '.png', dpi=80 )
#show()
| gpl-2.0 |
European-XFEL/h5tools-py | setup.py | 1 | 2890 | #!/usr/bin/env python
import os.path as osp
import re
from setuptools import setup, find_packages
import sys
def get_script_path():
return osp.dirname(osp.realpath(sys.argv[0]))
def read(*parts):
return open(osp.join(get_script_path(), *parts)).read()
def find_version(*parts):
vers_file = read(*parts)
match = re.search(r'^__version__ = "(\d+\.\d+\.\d+)"', vers_file, re.M)
if match is not None:
return match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name="karabo_data",
version=find_version("karabo_data", "__init__.py"),
author="European XFEL GmbH",
author_email="[email protected]",
maintainer="Thomas Michelat",
url="https://github.com/European-XFEL/karabo_data",
description="Tools to read and analyse data from European XFEL ",
long_description=read("README.md"),
long_description_content_type='text/markdown',
license="BSD-3-Clause",
packages=find_packages(),
package_data={
'karabo_data.tests': ['dssc_geo_june19.h5', 'lpd_mar_18.h5'],
},
entry_points={
"console_scripts": [
"lsxfel = karabo_data.lsxfel:main",
"karabo-bridge-serve-files = karabo_data.export:main",
"karabo-data-validate = karabo_data.validation:main",
"karabo-data-make-virtual-cxi = karabo_data.cli.make_virtual_cxi:main"
],
},
install_requires=[
'cfelpyutils>=0.92',
'fabio',
'h5py>=2.7.1',
'karabo-bridge',
'matplotlib',
'msgpack>=0.5.4',
'msgpack-numpy>=0.4.3',
'numpy',
'pandas',
'pyzmq>=17.0.0',
'scipy',
'xarray',
],
extras_require={
'docs': [
'sphinx',
'nbsphinx',
'ipython', # For nbsphinx syntax highlighting
'sphinxcontrib_github_alt',
],
'test': [
'dask[array]',
'pytest',
'pytest-cov',
'coverage<5', # Until nbval is compatible with coverage 5.0
'nbval',
'testpath',
]
},
python_requires='>=3.5',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Physics',
]
)
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
ShipJ/Code | Projects/SpringAutumnFair/src/analysis/clean.py | 1 | 5406 | import pandas as pd
import numpy as np
import sys
def clean(df):
"""
Takes a dataframe of salesforce data and maps values to a more usable format, returning a clean data set.
:param df: pandas DataFrame containing raw data
:return: df: cleaned data set - i.e. mapped to correct values
"""
# Replace empty and NaN's with None
empty_nan_map = {np.nan: None, '': None}
df.replace(empty_nan_map, inplace=True)
# Drop unwanted headers
df = pd.DataFrame(df.drop(['RegisteredCompany', 'OpportunityId', 'CreditStatus', 'CompanyTelephone', 'ShowSector',
'BillingPostalCode', 'BillingState', 'VATNumber', 'VATNumberValidationStatus', 'Website',
'CurrencyIsoCode', 'IsWon', 'InvoiceFrequency', 'LeadChannel', 'LeadSource',
'ProductDescription', 'ReasonLost', 'OtherReasonsLost', 'OtherCustomerObjectives',
'Probability', 'GrossArea', 'NetChargeableArea', 'ProductCategory'], axis=1))
# Exhibitions: map 'Spring Fair International 2017' -> Spring17
fairs = []
years = []
for i in range(len(df)):
fairs.append(df['Exhibition'][i].split(' ', 1)[0])
years.append(df['Exhibition'][i].split(' ')[3][2:])
df['Exhibition'] = fairs
df['Year'] = years
# Company Sectors: strip redundant values, repeat entries, mistake entries, combine 3 cols to 1 col
words, results = [], []
stopwords = ['and', '&', 'the']
for i in range(len(df)):
query1, query2, query3 = df['CompanySector'][i], df['CompanySector2'][i], df['CompanySector3'][i]
queries = list()
if query1 != None:
queries += list(query1.split())
if query2 != None:
queries += list(query2.split())
if query3!= None:
queries += list(query3.split())
else:
queries = None
if queries == None:
result = None
else:
result = list([word for word in queries if word.lower() not in stopwords])
mapping = [("Children\xe2\x80\x99s", 'Children\'s Gifts'), ('Gifts,', ''), ('Children?s', 'Children\'s Gifts'),
('Fashion,', 'Fashion'), ('Jewellery,', 'Jewellery'), ('Volume,', 'Volume'), ('Kitchen,', 'Kitchen'),
('Multiple, /', ''), ('Department', 'Department Store'), ('Stores', ''), ('retailer', 'Retail'),
('/', ''), ('Multiple', ''), (' ', '')]
for k, v in mapping:
result = [i.replace(k, v) for i in result]
if '' in result: result.remove('')
result = pd.unique(result)
results.append(result)
df = pd.DataFrame(df.drop(['CompanySector', 'CompanySector2', 'CompanySector3'], axis=1))
df['CompanySectors'] = results
# Replace unknown with None
exhibitorTypeMap = {'Unknown': None}
df['ExhibitorType'].replace(exhibitorTypeMap, inplace=True)
# Replace the multitude of Hall labels with the following map
hallMap = {'': None, '1': 1, '1.1': 1, '10,11,12': [10, 11, 12], '10-Dec': None, '11': 11, '19-20': [19, 20],
'2': 2, '20': 20, '3': 3, '4': 4, '5': 5, '6': 6, '9': 9, 'Autumn Fair Intl 2014 Hall 3': 3,
'Autumn Fair Intl 2015 Hall 1': 1, 'Autumn Fair Intl 2015 Hall 4': 4,
'Autumn Fair Intl 2015 Hall 5': 5, 'Ground Level': 'n/a', 'Hall 01': 1, 'Hall 02': 2,
'Hall 03': 3, 'Hall 04': 4, 'Hall 05': 5, 'Hall 1': 1, 'Hall 1 (H1)': 1,
'Hall 10,Hall 11,Hall 12': [10, 11, 12], 'Hall 10,Hall 11,Hall 12 (H10-12)': [10, 11, 12],
'Hall 10-12': [10, 11, 12], 'Hall 17,Hall 18 (H17-18)': [17, 18], 'Hall 17-19': [17, 18, 19],
'Hall 19,Hall 20': [19, 20], 'Hall 19,Hall 20 (H19-20)': [19, 20], 'Hall 19-20': [19, 20], 'Hall 2': 2,
'Hall 2 (H2)': 2, 'Hall 3': 3, 'Hall 3 (H3)': 3, 'Hall 3 3A': 3, 'Hall 3-3A': 3, 'Hall 4': 4,
'Hall 4 (H4)': 4, 'Hall 5': 5, 'Hall 5 (H5)': 5, 'Hall 6 & 7': [6, 7], 'Hall 6, Hall 7 (H6-7)': [6, 7],
'Hall 6,Hall7': [6, 7], 'Hall 6-7': [6, 7], 'Hall 8': 8, 'Hall 8 (H8)': 8, 'Hall 9': 9,
'Hall 9 (H9)': 9, 'Hall 9-10': [9, 10], 'Hall N1-19': range(1, 20), 'Halls 10-12': [10, 11, 12],
'Halls 6 & 7': [6, 7], 'Spring Fair International 2016 - Hall 1': 1,
'Spring Fair International 2016 - Hall 19&20': [19, 20], 'Spring Fair International 2016 - Hall 3': 3,
'Spring Fair International 2016 - Hall 4': 4, 'Spring Fair International 2016 - Hall 5': 5,
'Spring Fair International 2016 - Halls 19&20': [19, 20],
'Spring Fair International 2016 - Halls 6&7': [6, 7], 'Spring Fair International 2016 Halls 6 & 7': [6, 7]}
df['Hall'].replace(hallMap, inplace=True)
cityMap = {'.': '', 'X': '', 'Tbc': '', 'Oxon': 'Oxford', 'Girona 17469': 'Girona', 'Ny': 'New York'}
df['BillingCity'].replace(cityMap, inplace=True)
# Some stage names map to the same value
StageNameMap = {'': None, 'Adv. Commercial Negotiation': 'Commercial Negotiation', 'Close Lost': 'Closed Lost'}
df['StageName'].replace(StageNameMap, inplace=True)
# Some dates incorrectly labelled, must be greater than 0
df = df[df['CreateCloseDateDiff'] >= 0]
return df
| mit |
ChristianKniep/QNIB | serverfiles/usr/local/lib/networkx-1.6/examples/graph/atlas.py | 20 | 2637 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
#from networkx import *
#from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas=nx.graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U=nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree=[n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U=nx.disjoint_union(U,G)
# list of graphs of all connected components
C=nx.connected_component_subgraphs(U)
UU=nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist=[] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G,nlist):
nlist.append(G)
UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1,G2):
return True
return False
if __name__ == '__main__':
import networkx as nx
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
print(nx.number_connected_components(G),"connected components")
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1,figsize=(8,8))
# layout graphs with positions using graphviz neato
pos=nx.graphviz_layout(G,prog="neato")
# color nodes the same in each connected subgraph
C=nx.connected_component_subgraphs(G)
for g in C:
c=[random.random()]*nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png",dpi=75)
| gpl-2.0 |
bthirion/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 51 | 12799 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
# Compare analytic and numeric gradient of kernels.
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
# Check that parameter vector theta of kernel is set correctly.
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s[0:-len("_bounds")],
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i + 1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
# Auto-correlation and cross-correlation should be consistent.
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
# Test that diag method of kernel returns consistent results.
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
# Adding kernels and multiplying kernels should be commutative.
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
# Anisotropic kernel should be consistent with isotropic kernels.
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
# Test stationarity of kernels.
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def check_hyperparameters_equal(kernel1, kernel2):
# Check that hyperparameters of two kernels are equal
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert_equal(attr_value1, attr_value2)
def test_kernel_clone():
# Test that sklearn's clone works correctly on kernels.
for kernel in kernels:
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
# Check that all constructor parameters are equal.
assert_equal(kernel.get_params(), kernel_cloned.get_params())
# Check that all hyperparameters are equal.
yield check_hyperparameters_equal, kernel, kernel_cloned
def test_kernel_clone_after_set_params():
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
bounds = (1e-5, 1e5)
for kernel in kernels:
kernel_cloned = clone(kernel)
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel,
isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert_equal(kernel_cloned_clone.get_params(),
kernel_cloned.get_params())
assert_not_equal(id(kernel_cloned_clone), id(kernel_cloned))
yield (check_hyperparameters_equal, kernel_cloned,
kernel_cloned_clone)
def test_matern_kernel():
# Test consistency of Matern kernel for special values of nu.
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
# Check that GP kernels can also be used as pairwise kernels.
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
# Check that set_params()/get_params() is consistent with kernel.theta.
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
def test_repr_kernels():
# Smoke-test for repr in kernels.
for kernel in kernels:
repr(kernel)
| bsd-3-clause |
worldofchris/jlf | jlf_stats/test/test_publisher.py | 1 | 21591 | """
What output do we expect from JLF?
Tables and Graphs in Excel for:
-Cumulative Throughput
-Ratio of Work Types (i.e. Value/Failure/Overhead)
-Introduction of Defects
-Cycle Time
"""
import unittest
import tempfile
import os
from subprocess import call
import mock
from jlf_stats.metrics import Metrics
from jlf_stats import publisher
from datetime import date
import pandas as pd
import xlrd
import zipfile
import filecmp
def serve_dummy_results(*args, **kwargs):
return pd.DataFrame([1, 2, 3])
def serve_dummy_throughput(*args, **kwargs):
try:
if kwargs['types'] == ['failure', 'value', 'operational overhead']:
return pd.DataFrame([4, 5, 6])
else:
dummy = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
dummy.columns = ['one', 'two', 'three']
return dummy
except KeyError:
return pd.DataFrame({'one': [1, 2, 3]})
def serve_dummy_detail(*args, **kwargs):
if 'fields' in kwargs:
fields = kwargs['fields']
values = [1] * len(fields)
dummy = pd.DataFrame([values], columns=fields)
else:
dummy = pd.DataFrame([['TICKET-1', 'Dummy Ticket', 3]])
dummy.columns = ['id', 'name', 'cycle time']
return dummy
def serve_dummy_cfd_data(*args, **kwargs):
dummy = pd.DataFrame([['', 'in progress', 'closed'],
['open', 'in progress', 'closed'],
['open', 'closed', 'closed']])
return dummy
class TestGetOutput(unittest.TestCase):
def setUp(self):
self.mock_metrics = mock.Mock(spec=Metrics)
self.mock_metrics.throughput.side_effect = serve_dummy_throughput
self.mock_metrics.demand.side_effect = serve_dummy_results
self.mock_metrics.cycle_time_histogram.side_effect = serve_dummy_results
self.mock_metrics.arrival_rate.side_effect = serve_dummy_results
self.mock_metrics.details.side_effect = serve_dummy_detail
self.mock_metrics.cfd.side_effect = serve_dummy_cfd_data
self.workspace = tempfile.mkdtemp()
def testSmokeOutCommandLine(self):
"""
Smoke test to ensure we have not broken running from the command line
"""
expected_filename = 'reports.xlsx'
pwd = os.path.dirname(os.path.abspath(__file__))
bin_dir = '../../bin'
jlf = os.path.join(pwd, bin_dir, 'jlf')
config_file = os.path.join(pwd, bin_dir, 'config.json')
saved_path = os.getcwd()
os.chdir(self.workspace)
call([jlf, '-c', config_file])
os.chdir(saved_path)
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output))
def testOutputThroughputToExcel(self):
# Given this report config:
report_config = {'name': 'reports',
'reports': [{'metric': 'throughput',
'categories': 'foreach',
'types': 'foreach'}],
'format': 'xlsx',
'location': self.workspace,
'counts_towards_throughput': [],
'types': {'failure': ['Bug', 'Fault'],
'value': ['New Feature', 'Story', 'Improvement'],
'oo': ['Task', 'Decision', 'User Support', 'Spike']}}
# Specify categories and types to report on or:
# foreach - to report on each category separately
# combine - to aggregate totals together
# when we publish the metrics for the data in our jira
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
# Then we should get an Excel workbook
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
# with a sheet containing the throughput data
workbook = xlrd.open_workbook(actual_output)
self.assertEqual('throughput', workbook.sheet_names()[0])
def testOutputCumulativeThroughputToExcel(self):
report_config = {'name': 'reports',
'reports': [{'metric': 'cumulative-throughput',
'categories': 'foreach',
'types': 'foreach'}],
'format': 'xlsx',
'counts_towards_throughput': [],
'location': self.workspace,
'types': {'failure': ['Bug', 'Fault'],
'value': ['New Feature', 'Story', 'Improvement'],
'oo': ['Task', 'Decision', 'User Support', 'Spike']}}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
# with a sheet containing the throughput data
workbook = xlrd.open_workbook(actual_output)
self.assertEqual('cumulative-throughput', workbook.sheet_names()[0])
def testOutputFailureDemandToExcel(self):
report_config = {'name': 'reports',
'reports': [{'metric': 'demand',
'categories': 'foreach',
'types': ['failure']}],
'format': 'xlsx',
'location': self.workspace,
'counts_towards_throughput': [],
'types': {'failure': ['Bug', 'Fault'],
'value': ['New Feature', 'Story', 'Improvement'],
'oo': ['Task', 'Decision', 'User Support', 'Spike']}}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
# with a sheet containing the throughput data
workbook = xlrd.open_workbook(actual_output)
self.assertEqual('failure-demand', workbook.sheet_names()[0])
def testOutputDetailToExcel(self):
report_config = {'name': 'reports',
'reports': [{'metric': 'detail',
'fields': ['this', 'that', 'the other'],
'categories': 'foreach',
'types': 'foreach',
'sort': 'week-done'}],
'format': 'xlsx',
'counts_towards_throughput': [],
'location': self.workspace,
'types': {'failure': ['Bug', 'Fault'],
'value': ['New Feature', 'Story', 'Improvement'],
'oo': ['Task', 'Decision', 'User Support', 'Spike']}}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
# with a sheet containing the throughput data
workbook = xlrd.open_workbook(actual_output)
self.assertEqual('detail', workbook.sheet_names()[0])
sheet = workbook.sheet_by_name('detail')
fields = report_config['reports'][0]['fields']
for i in range(len(fields)):
self.assertEqual(sheet.cell_value(0, i+1), fields[i])
# and test sorted by week-done...
# and test when no fields specified
def testOutputCycleTimeToExcel(self):
report_config = {'name': 'reports',
'reports': [{'metric': 'cycle-time',
'categories': 'foreach',
'types': ['value'],
'cycles': ['develop']}],
'format': 'xlsx',
'location': self.workspace}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
# with a sheet containing the throughput data
workbook = xlrd.open_workbook(actual_output)
self.assertEqual('value-develop-cycle-time', workbook.sheet_names()[0])
def testOutputCFDToExcel(self):
report_config = {'name': 'reports',
'states': [],
'reports': [{'metric': 'cfd'}],
'format': 'xlsx',
'counts_towards_throughput': [],
'location': self.workspace}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
# with a sheet containing the throughput data
workbook = xlrd.open_workbook(actual_output)
self.assertEqual('cfd', workbook.sheet_names()[0])
def testMakeValidSheetTitle(self):
titles = [('failure-value-operational overhead-demand', 'failure-value-operatio-demand'),
('aa-bb-cc-dd-ee-ff-gg-hh-ii-jj-kk-ll-mm-nn-oo-pp-qq-rr-ss-tt-uu-vv-ww-demand', 'a-b-c-d-e-f-g-h-i-j-k-l-demand')]
for title in titles:
actual_title = publisher.worksheet_title(title[0])
expected_title = title[1]
self.assertEqual(actual_title, expected_title)
def testOutputMultipleTypesOfThroughput(self):
report_config = {'name': 'reports',
'reports': [{'metric': 'throughput',
'categories': 'foreach',
'types': 'foreach'}],
'format': 'xlsx',
'location': self.workspace,
'counts_towards_throughput': [],
'categories': {'one': 'project = "one"',
'two': 'project = "two"',
'three': 'project = "three"'},
'types': {'failure': ['Bug', 'Fault'],
'value': ['New Feature', 'Story', 'Improvement'],
'oo': ['Task', 'Decision', 'User Support', 'Spike']}}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
workbook = xlrd.open_workbook(actual_output)
expected_sheet_name = 'throughput'
self.assertEqual(expected_sheet_name, workbook.sheet_names()[0])
worksheet = workbook.sheet_by_name(expected_sheet_name)
header_row = worksheet.row(0)
expected_headers = ['one', 'two', 'three']
for cell in header_row[1:]:
self.assertEqual(cell.value, expected_headers[header_row[1:].index(cell)])
@unittest.skip("Unfinished")
def testOutputArrivalRateToExcel(self):
report_config = {'name': 'reports',
'reports': [{'metric': 'arrival-rate'}],
'format': 'xlsx',
'location': self.workspace}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
workbook = xlrd.open_workbook(actual_output)
expected_sheet_name = 'arrival-rate'
self.assertEqual(expected_sheet_name, workbook.sheet_names()[0])
worksheet = workbook.sheet_by_name(expected_sheet_name)
header_row = worksheet.row(0)
expected_headers = ['one', 'two', 'three']
# This isn't finished
def testGetDefaultColours(self):
"""
If a cfd report doesn't specify formats for the states then use the defaults
"""
expected_formats = {'open': {'color': publisher._state_default_colours[0]},
'in progress': {'color': publisher._state_default_colours[1]},
'closed': {'color': publisher._state_default_colours[2]}}
states = ['open', 'in progress', 'closed']
actual_formats = publisher.format_states(states)
self.assertEqual(actual_formats, expected_formats)
def testMoreStatesThanDefaultColours(self):
"""
What to do if we run out of default colours
"""
expected_formats = {}
states = []
for a in range(2):
for index, colour in enumerate(publisher._state_default_colours):
state_name = 's{0}{1}'.format(a, index)
expected_formats[state_name] = {'color': colour}
states.append(state_name)
actual_formats = publisher.format_states(states)
self.assertEqual(actual_formats, expected_formats, expected_formats)
def testCfdDefaultColours(self):
report_config = {'name': 'reports_default',
'states': ['open', 'in progress', 'closed'],
'reports': [{'metric': 'cfd'}],
'format': 'xlsx',
'location': self.workspace}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports_default.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
self.compareExcelFiles(actual_output, expected_filename)
def testColourInExcelCfd(self):
report_config = {'name': 'reports',
'reports': [{'metric': 'cfd',
'format': {'open': {'color': 'green'},
'in progress': {'color': 'red'},
'closed': {'color': 'yellow'}}}],
'format': 'xlsx',
'location': self.workspace}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
self.compareExcelFiles(actual_output, expected_filename)
@unittest.skip("TODO")
def testHistoryToExcel(self):
pass
def testCumulativeThroughputGraph(self):
report_config = {'name': 'reports',
'reports': [{'metric': 'cumulative-throughput',
'categories': 'foreach',
'types': 'foreach',
'graph': 'yes'}],
'format': 'xlsx',
'counts_towards_throughput': [],
'location': self.workspace,
'types': {'failure': ['Bug', 'Fault'],
'value': ['New Feature', 'Story', 'Improvement'],
'oo': ['Task', 'Decision', 'User Support', 'Spike']}}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
def testSeriesName(self):
expected_name = "Features"
actual_name = publisher.series_name("bigcorp-features")
self.assertEqual(actual_name, expected_name)
def testAboutReport(self):
"""
Add a bit of blurb to the report
"""
report_config = {'name': 'reports',
'reports': [{'metric': 'cumulative-throughput',
'description': 'All about flow',
'categories': 'foreach',
'types': 'foreach'}],
'format': 'xlsx',
'counts_towards_throughput': [],
'location': self.workspace,
'types': {'failure': ['Bug', 'Fault'],
'value': ['New Feature', 'Story', 'Improvement'],
'oo': ['Task', 'Decision', 'User Support', 'Spike']}}
publisher.publish(report_config,
self.mock_metrics,
from_date=date(2012, 10, 8),
to_date=date(2012, 11, 12))
expected_filename = 'reports.xlsx'
actual_output = os.path.join(self.workspace, expected_filename)
self.assertTrue(os.path.isfile(actual_output), "Spreadsheet not published:{spreadsheet}".format(spreadsheet=actual_output))
workbook = xlrd.open_workbook(actual_output)
expected_sheet_name = 'cumulative-throughput'
self.assertEqual(expected_sheet_name, workbook.sheet_names()[0])
worksheet = workbook.sheet_by_name(expected_sheet_name)
actual = worksheet.cell_value(rowx=0, colx=5)
expected = report_config['reports'][0]['description']
self.assertEqual(actual, expected)
################################################################################################################
def compareExcelFiles(self, actual_output, expected_filename):
# Sadly reading of xlsx files with their formatting by xlrd is not supported.
# Looking at the Open Office XML format you can see why - http://en.wikipedia.org/wiki/Office_Open_XML
# It's not exactly human readable.
#
# So, I am going to unzip the resulting xlsx file and diff the worksheet against a known good one.
cmp_files = ['xl/worksheets/sheet1.xml',
'xl/sharedStrings.xml',
'xl/styles.xml',
'xl/workbook.xml',
'xl/theme/theme1.xml']
expected_workspace = os.path.join(self.workspace, 'expected')
os.makedirs(expected_workspace)
expected_output = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', expected_filename)
with zipfile.ZipFile(expected_output, "r") as z:
z.extractall(expected_workspace)
actual_workspace = os.path.join(self.workspace, 'actual')
os.makedirs(actual_workspace)
with zipfile.ZipFile(actual_output, "r") as z:
z.extractall(actual_workspace)
for cmp_file in cmp_files:
expected_full_path = os.path.join(expected_workspace, cmp_file)
actual_full_path = os.path.join(actual_workspace, cmp_file)
self.assertTrue(filecmp.cmp(expected_full_path, actual_full_path), '{0}:{1}'.format(expected_full_path, actual_full_path))
| bsd-2-clause |
toobaz/pandas | pandas/tests/arrays/test_datetimes.py | 2 | 10859 | """
Tests for DatetimeArray
"""
import operator
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import sequence_to_dt64ns
import pandas.util.testing as tm
class TestDatetimeArrayConstructor:
def test_only_1dim_accepted(self):
arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]")
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 2-dim
DatetimeArray(arr.reshape(2, 2))
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 0-dim
DatetimeArray(arr[[0]].squeeze())
def test_freq_validation(self):
# GH#24623 check that invalid instances cannot be created with the
# public constructor
arr = np.arange(5, dtype=np.int64) * 3600 * 10 ** 9
msg = (
"Inferred frequency H from passed values does not "
"conform to passed frequency W-SUN"
)
with pytest.raises(ValueError, match=msg):
DatetimeArray(arr, freq="W")
@pytest.mark.parametrize(
"meth",
[
DatetimeArray._from_sequence,
sequence_to_dt64ns,
pd.to_datetime,
pd.DatetimeIndex,
],
)
def test_mixing_naive_tzaware_raises(self, meth):
# GH#24569
arr = np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")])
msg = (
"Cannot mix tz-aware with tz-naive values|"
"Tz-aware datetime.datetime cannot be converted "
"to datetime64 unless utc=True"
)
for obj in [arr, arr[::-1]]:
# check that we raise regardless of whether naive is found
# before aware or vice-versa
with pytest.raises(ValueError, match=msg):
meth(obj)
def test_from_pandas_array(self):
arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10 ** 9
result = DatetimeArray._from_sequence(arr, freq="infer")
expected = pd.date_range("1970-01-01", periods=5, freq="H")._data
tm.assert_datetime_array_equal(result, expected)
def test_mismatched_timezone_raises(self):
arr = DatetimeArray(
np.array(["2000-01-01T06:00:00"], dtype="M8[ns]"),
dtype=DatetimeTZDtype(tz="US/Central"),
)
dtype = DatetimeTZDtype(tz="US/Eastern")
with pytest.raises(TypeError, match="Timezone of the array"):
DatetimeArray(arr, dtype=dtype)
def test_non_array_raises(self):
with pytest.raises(ValueError, match="list"):
DatetimeArray([1, 2, 3])
def test_other_type_raises(self):
with pytest.raises(
ValueError, match="The dtype of 'values' is incorrect.*bool"
):
DatetimeArray(np.array([1, 2, 3], dtype="bool"))
def test_incorrect_dtype_raises(self):
with pytest.raises(ValueError, match="Unexpected value for 'dtype'."):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="category")
def test_freq_infer_raises(self):
with pytest.raises(ValueError, match="Frequency inference"):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), freq="infer")
def test_copy(self):
data = np.array([1, 2, 3], dtype="M8[ns]")
arr = DatetimeArray(data, copy=False)
assert arr._data is data
arr = DatetimeArray(data, copy=True)
assert arr._data is not data
class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
# sufficiently robust
def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators):
# arbitrary tz-naive DatetimeIndex
opname = all_compare_operators.strip("_")
op = getattr(operator, opname)
dti = pd.date_range("2016-01-1", freq="MS", periods=9, tz=None)
arr = DatetimeArray(dti)
assert arr.freq == dti.freq
assert arr.tz == dti.tz
right = dti
expected = np.ones(len(arr), dtype=bool)
if opname in ["ne", "gt", "lt"]:
# for these the comparisons should be all-False
expected = ~expected
result = op(arr, arr)
tm.assert_numpy_array_equal(result, expected)
for other in [right, np.array(right)]:
# TODO: add list and tuple, and object-dtype once those
# are fixed in the constructor
result = op(arr, other)
tm.assert_numpy_array_equal(result, expected)
result = op(other, arr)
tm.assert_numpy_array_equal(result, expected)
class TestDatetimeArray:
def test_astype_to_same(self):
arr = DatetimeArray._from_sequence(["2000"], tz="US/Central")
result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False)
assert result is arr
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype_int(self, dtype):
arr = DatetimeArray._from_sequence([pd.Timestamp("2000"), pd.Timestamp("2001")])
result = arr.astype(dtype)
if np.dtype(dtype).kind == "u":
expected_dtype = np.dtype("uint64")
else:
expected_dtype = np.dtype("int64")
expected = arr.astype(expected_dtype)
assert result.dtype == expected_dtype
tm.assert_numpy_array_equal(result, expected)
def test_tz_setter_raises(self):
arr = DatetimeArray._from_sequence(["2000"], tz="US/Central")
with pytest.raises(AttributeError, match="tz_localize"):
arr.tz = "UTC"
def test_setitem_different_tz_raises(self):
data = np.array([1, 2, 3], dtype="M8[ns]")
arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central"))
with pytest.raises(ValueError, match="None"):
arr[0] = pd.Timestamp("2000")
with pytest.raises(ValueError, match="US/Central"):
arr[0] = pd.Timestamp("2000", tz="US/Eastern")
def test_setitem_clears_freq(self):
a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central"))
a[0] = pd.Timestamp("2000", tz="US/Central")
assert a.freq is None
def test_repeat_preserves_tz(self):
dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")
arr = DatetimeArray(dti)
repeated = arr.repeat([1, 1])
# preserves tz and values, but not freq
expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype)
tm.assert_equal(repeated, expected)
def test_value_counts_preserves_tz(self):
dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")
arr = DatetimeArray(dti).repeat([4, 3])
result = arr.value_counts()
# Note: not tm.assert_index_equal, since `freq`s do not match
assert result.index.equals(dti)
arr[-2] = pd.NaT
result = arr.value_counts()
expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["pad", "backfill"])
def test_fillna_preserves_tz(self, method):
dti = pd.date_range("2000-01-01", periods=5, freq="D", tz="US/Central")
arr = DatetimeArray(dti, copy=True)
arr[2] = pd.NaT
fill_val = dti[1] if method == "pad" else dti[3]
expected = DatetimeArray._from_sequence(
[dti[0], dti[1], fill_val, dti[3], dti[4]], freq=None, tz="US/Central"
)
result = arr.fillna(method=method)
tm.assert_extension_array_equal(result, expected)
# assert that arr and dti were not modified in-place
assert arr[2] is pd.NaT
assert dti[2] == pd.Timestamp("2000-01-03", tz="US/Central")
def test_array_interface_tz(self):
tz = "US/Central"
data = DatetimeArray(pd.date_range("2017", periods=2, tz=tz))
result = np.asarray(data)
expected = np.array(
[
pd.Timestamp("2017-01-01T00:00:00", tz=tz),
pd.Timestamp("2017-01-02T00:00:00", tz=tz),
],
dtype=object,
)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(data, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(data, dtype="M8[ns]")
expected = np.array(
["2017-01-01T06:00:00", "2017-01-02T06:00:00"], dtype="M8[ns]"
)
tm.assert_numpy_array_equal(result, expected)
def test_array_interface(self):
data = DatetimeArray(pd.date_range("2017", periods=2))
expected = np.array(
["2017-01-01T00:00:00", "2017-01-02T00:00:00"], dtype="datetime64[ns]"
)
result = np.asarray(data)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(data, dtype=object)
expected = np.array(
[pd.Timestamp("2017-01-01T00:00:00"), pd.Timestamp("2017-01-02T00:00:00")],
dtype=object,
)
tm.assert_numpy_array_equal(result, expected)
class TestSequenceToDT64NS:
def test_tz_dtype_mismatch_raises(self):
arr = DatetimeArray._from_sequence(["2000"], tz="US/Central")
with pytest.raises(TypeError, match="data is already tz-aware"):
sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC"))
def test_tz_dtype_matches(self):
arr = DatetimeArray._from_sequence(["2000"], tz="US/Central")
result, _, _ = sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="US/Central"))
tm.assert_numpy_array_equal(arr._data, result)
class TestReductions:
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_min_max(self, tz):
arr = DatetimeArray._from_sequence(
[
"2000-01-03",
"2000-01-03",
"NaT",
"2000-01-02",
"2000-01-05",
"2000-01-04",
],
tz=tz,
)
result = arr.min()
expected = pd.Timestamp("2000-01-02", tz=tz)
assert result == expected
result = arr.max()
expected = pd.Timestamp("2000-01-05", tz=tz)
assert result == expected
result = arr.min(skipna=False)
assert result is pd.NaT
result = arr.max(skipna=False)
assert result is pd.NaT
@pytest.mark.parametrize("tz", [None, "US/Central"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_empty(self, skipna, tz):
arr = DatetimeArray._from_sequence([], tz=tz)
result = arr.min(skipna=skipna)
assert result is pd.NaT
result = arr.max(skipna=skipna)
assert result is pd.NaT
| bsd-3-clause |
sureshthalamati/spark | python/pyspark/sql/utils.py | 3 | 5464 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace):
self.desc = desc
self.stackTrace = stackTrace
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(),
e.java_exception.getStackTrace()))
if s.startswith('org.apache.spark.sql.AnalysisException: '):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
raise ParseException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
raise StreamingQueryException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
if s.startswith('java.lang.IllegalArgumentException: '):
raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
except ImportError:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.8.0"
from distutils.version import LooseVersion
try:
import pyarrow
except ImportError:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
| apache-2.0 |
QJonny/CyNest | cynest/nest/raster_plot.py | 2 | 6131 | import cynest as nest
import numpy
import pylab
def extract_events(data, time=None, sel=None):
"""
Extracts all events within a given time interval or are from a
given set of neurons.
- data is a matrix such that
data[:,0] is a vector of all gids and
data[:,1] a vector with the corresponding time stamps.
- time is a list with at most two entries such that
time=[t_max] extracts all events with t< t_max
time=[t_min, t_max] extracts all events with t_min <= t < t_max
- sel is a list of gids such that
sel=[gid1, ... , gidn] extracts all events from these gids.
All others are discarded.
Both time and sel may be used at the same time such that all
events are extracted for which both conditions are true.
"""
val = []
if time:
t_max = time[-1]
if len(time) > 1:
t_min = time[0]
else:
t_min = 0
for v in data:
t = v[1]
gid = v[0]
if time and (t < t_min or t >= t_max):
continue
if not sel or gid in sel:
val.append(v)
return numpy.array(val)
def from_data(data, title=None, hist=False, hist_binwidth=5.0, grayscale=False, sel=None):
"""
Plot raster from data array
"""
ts = data[:, 1]
d = extract_events(data, sel=sel)
ts1 = d[:, 1]
gids = d[:, 0]
return _make_plot(ts, ts1, gids, data[:, 0], hist, hist_binwidth, grayscale, title)
def from_file(fname, title=None, hist=False, hist_binwidth=5.0, grayscale=False):
"""
Plot raster from file
"""
if nest.is_sequencetype(fname):
data = None
for f in fname:
if data is None:
data = numpy.loadtxt(f)
else:
data = numpy.concatenate((data, numpy.loadtxt(f)))
else:
data = numpy.loadtxt(fname)
return from_data(data, title, hist, hist_binwidth, grayscale)
def from_device(detec, title=None, hist=False, hist_binwidth=5.0, grayscale=False, plot_lid=False):
"""
Plot raster from spike detector
"""
if not nest.GetStatus(detec)[0]["model"] == "spike_detector":
raise nest.NESTError("Please provide a spike_detector.")
if nest.GetStatus(detec, "to_memory")[0]:
ts, gids = _from_memory(detec)
if not len(ts):
raise nest.NESTError("No events recorded!")
if plot_lid:
gids = [nest.GetLID([x]) for x in gids]
if title is None:
title = "Raster plot from device '%i'" % detec[0]
if nest.GetStatus(detec)[0]["time_in_steps"]:
xlabel = "Steps"
else:
xlabel = "Time (ms)"
return _make_plot(ts, ts, gids, gids, hist, hist_binwidth, grayscale, title, xlabel)
elif nest.GetStatus(detec, "to_file")[0]:
fname = nest.GetStatus(detec, "filenames")[0]
return from_file(fname, title, hist, hist_binwidth, grayscale)
else:
raise nest.NESTError("No data to plot. Make sure that either to_memory or to_file are set.")
def _from_memory(detec):
ev = nest.GetStatus(detec, "events")[0]
return ev["times"], ev["senders"]
def _make_plot(ts, ts1, gids, neurons, hist, hist_binwidth, grayscale, title, xlabel=None):
"""
Generic plotting routine that constructs a raster plot along with
an optional histogram (common part in all routines above)
"""
pylab.figure()
if grayscale:
color_marker = ".k"
color_bar = "gray"
else:
color_marker = "."
color_bar = "blue"
color_edge = "black"
if xlabel is None:
xlabel = "Time (ms)"
ylabel = "Neuron ID"
if hist:
ax1 = pylab.axes([0.1, 0.3, 0.85, 0.6])
plotid = pylab.plot(ts1, gids, color_marker)
pylab.ylabel(ylabel)
pylab.xticks([])
xlim = pylab.xlim()
pylab.axes([0.1, 0.1, 0.85, 0.17])
t_bins = numpy.arange(numpy.amin(ts), numpy.amax(ts), float(hist_binwidth))
n, bins = _histogram(ts, bins=t_bins)
num_neurons = len(numpy.unique(neurons))
heights = 1000 * n / (hist_binwidth * num_neurons)
pylab.bar(t_bins, heights, width=hist_binwidth, color=color_bar, edgecolor=color_edge)
pylab.yticks([int(x) for x in numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)])
pylab.ylabel("Rate (Hz)")
pylab.xlabel(xlabel)
pylab.xlim(xlim)
pylab.axes(ax1)
else:
plotid = pylab.plot(ts1, gids, color_marker)
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
if title is None:
pylab.title("Raster plot")
else:
pylab.title(title)
pylab.draw()
return plotid
def _histogram(a, bins=10, rangeV=None, normed=False):
from numpy import asarray, iterable, linspace, sort, concatenate
a = asarray(a).ravel()
if rangeV is not None:
mn, mx = rangeV
if mn > mx:
raise AttributeError( "max must be larger than min in range parameter.")
if not iterable(bins):
if rangeV is None:
rangeV = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in rangeV]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins, endpoint=False)
else:
if(bins[1:] - bins[:-1] < 0).any():
raise AttributeError("bins must increase monotonically.")
# best block size probably depends on processor cache size
block = 65536
n = sort(a[:block]).searchsorted(bins)
for i in range(block, a.size, block):
n += sort(a[i:i + block]).searchsorted(bins)
n = concatenate([n, [len(a)]])
n = n[1:] - n[:-1]
if normed:
db = bins[1] - bins[0]
return 1.0 / (a.size * db) * n, bins
else:
return n, bins
def show():
"""
Call pylab.show() to show all figures and enter the GUI main loop.
Python will block until all figure windows are closed again.
You should call this function only once at the end of a script.
See also: http://matplotlib.sourceforge.net/faq/howto_faq.html#use-show
"""
pylab.show()
| gpl-2.0 |
joernhees/scikit-learn | doc/conf.py | 2 | 9836 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2017, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.8.1',
'scipy': 'http://docs.scipy.org/doc/scipy-0.13.3/reference'},
'expected_failing_examples': [
'../examples/applications/plot_stock_market.py']
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
phdowling/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
bikong2/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
wanggang3333/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
FedeMPouzols/Savu | scripts/log_evaluation/VisualiseProfileData.py | 2 | 3197 | import GraphicalThreadProfiler as GTP
import GraphicalThreadProfiler_multi as GTP_m
import fnmatch
import os
def convert(files):
import pandas as pd
# calculate the mean and std
test = []
index = ['file_system', 'nNodes_x_nCores', 'Mean_time', 'nNodes',
'Std_time']
for file in files:
temp_frame = pd.read_csv(file, header=None)
a = (file.split('/')[-1]).split('_')
vals = pd.Series([a[3],
int(a[-6].split('N')[1])*int(a[-5].split('C')[1]),
int(temp_frame.iloc[1, 1])*0.001,
int(a[-6].split('N')[1]),
int(temp_frame.iloc[2, 1])*0.001], index=index)
test.append(vals)
all_vals = (pd.concat(test, axis=1).transpose())
frame = all_vals
return frame
def get_files(dir_path):
from os import listdir
from os.path import isfile, join
all_files = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
files = [(dir_path + '/' + f) for f in all_files]
return files
def render_template(frame, outfilename, title, size, params, header_shift,
max_std):
from jinja2 import Template
import template_strings as ts
nVals = len(frame)
f_out = open(outfilename, 'w')
template = Template(ts.set_template_string_vis(nVals, title, size, params,
header_shift))
style = os.path.dirname(__file__) + '/style_sheet.css'
print outfilename
f_out.write(template.render(frame=[map(list, f) for f in frame],
style_sheet=style, max_bubble=max_std))
f_out.close()
return
def convert_all_files():
all_files = get_files(os.getcwd())
single_files = [f for f in all_files if f.split('.')[-1][0] is 'o']
GTP.convert(single_files)
wildcard_files = [(os.path.dirname(f) + '/' +
os.path.basename(f).split('.')[-2] + '*')
for f in single_files]
for wildcard in set(wildcard_files):
matching_files = []
for file in single_files:
if fnmatch.fnmatch(file, wildcard):
matching_files.append(file)
GTP_m.convert(matching_files)
create_bubble_chart(get_files(os.getcwd()))
def create_bubble_chart(all_files):
stats_files = [f for f in all_files if 'stats.csv' in f]
frame = convert(stats_files)
max_std = frame.Std_time.max()
frame['link'] = [('file://' + f.split('_stats')[0] + '.html')
for f in stats_files]
size = [(70, 70), (100, 100)]
#params = {}
params = {'Chunk': 'false', 'Process': '12', 'Data size': '(91,135,160)'}
render_template([frame.values.tolist()], 'analysis.html', ['Nodes'], size,
params, 0, max_std)
if __name__ == "__main__":
import optparse
usage = "%prog [options] input_file"
parser = optparse.OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) is 1:
filename = (os.getcwd() if args[0] is '.' else args[0])
create_bubble_chart(get_files(filename))
else:
convert_all_files()
| gpl-3.0 |
neuroidss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/testfuncs.py | 72 | 20890 | """Some test functions for bivariate interpolation.
Most of these have been yoinked from ACM TOMS 792.
http://netlib.org/toms/792
"""
import numpy as np
from triangulate import Triangulation
class TestData(dict):
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self.__dict__ = self
class TestDataSet(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
data = TestData(
franke100=TestDataSet(
x=np.array([ 0.0227035, 0.0539888, 0.0217008, 0.0175129, 0.0019029,
-0.0509685, 0.0395408, -0.0487061, 0.0315828, -0.0418785,
0.1324189, 0.1090271, 0.1254439, 0.093454 , 0.0767578,
0.1451874, 0.0626494, 0.1452734, 0.0958668, 0.0695559,
0.2645602, 0.2391645, 0.208899 , 0.2767329, 0.1714726,
0.2266781, 0.1909212, 0.1867647, 0.2304634, 0.2426219,
0.3663168, 0.3857662, 0.3832392, 0.3179087, 0.3466321,
0.3776591, 0.3873159, 0.3812917, 0.3795364, 0.2803515,
0.4149771, 0.4277679, 0.420001 , 0.4663631, 0.4855658,
0.4092026, 0.4792578, 0.4812279, 0.3977761, 0.4027321,
0.5848691, 0.5730076, 0.6063893, 0.5013894, 0.5741311,
0.6106955, 0.5990105, 0.5380621, 0.6096967, 0.5026188,
0.6616928, 0.6427836, 0.6396475, 0.6703963, 0.7001181,
0.633359 , 0.6908947, 0.6895638, 0.6718889, 0.6837675,
0.7736939, 0.7635332, 0.7410424, 0.8258981, 0.7306034,
0.8086609, 0.8214531, 0.729064 , 0.8076643, 0.8170951,
0.8424572, 0.8684053, 0.8366923, 0.9418461, 0.8478122,
0.8599583, 0.91757 , 0.8596328, 0.9279871, 0.8512805,
1.044982 , 0.9670631, 0.9857884, 0.9676313, 1.0129299,
0.965704 , 1.0019855, 1.0359297, 1.0414677, 0.9471506]),
y=np.array([-0.0310206, 0.1586742, 0.2576924, 0.3414014, 0.4943596,
0.5782854, 0.6993418, 0.7470194, 0.9107649, 0.996289 ,
0.050133 , 0.0918555, 0.2592973, 0.3381592, 0.4171125,
0.5615563, 0.6552235, 0.7524066, 0.9146523, 0.9632421,
0.0292939, 0.0602303, 0.2668783, 0.3696044, 0.4801738,
0.5940595, 0.6878797, 0.8185576, 0.9046507, 0.9805412,
0.0396955, 0.0684484, 0.2389548, 0.3124129, 0.4902989,
0.5199303, 0.6445227, 0.8203789, 0.8938079, 0.9711719,
-0.0284618, 0.1560965, 0.2262471, 0.3175094, 0.3891417,
0.5084949, 0.6324247, 0.7511007, 0.8489712, 0.9978728,
-0.0271948, 0.127243 , 0.2709269, 0.3477728, 0.4259422,
0.6084711, 0.6733781, 0.7235242, 0.9242411, 1.0308762,
0.0255959, 0.0707835, 0.2008336, 0.3259843, 0.4890704,
0.5096324, 0.669788 , 0.7759569, 0.9366096, 1.0064516,
0.0285374, 0.1021403, 0.1936581, 0.3235775, 0.4714228,
0.6091595, 0.6685053, 0.8022808, 0.847679 , 1.0512371,
0.0380499, 0.0902048, 0.2083092, 0.3318491, 0.4335632,
0.5910139, 0.6307383, 0.8144841, 0.904231 , 0.969603 ,
-0.01209 , 0.1334114, 0.2695844, 0.3795281, 0.4396054,
0.5044425, 0.6941519, 0.7459923, 0.8682081, 0.9801409])),
franke33=TestDataSet(
x=np.array([ 5.00000000e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.00000000e-01, 1.00000000e-01,
1.50000000e-01, 2.00000000e-01, 2.50000000e-01,
3.00000000e-01, 3.50000000e-01, 5.00000000e-01,
5.00000000e-01, 5.50000000e-01, 6.00000000e-01,
6.00000000e-01, 6.00000000e-01, 6.50000000e-01,
7.00000000e-01, 7.00000000e-01, 7.00000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.00000000e-01, 8.00000000e-01, 8.50000000e-01,
9.00000000e-01, 9.00000000e-01, 9.50000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([ 4.50000000e-01, 5.00000000e-01, 1.00000000e+00,
0.00000000e+00, 1.50000000e-01, 7.50000000e-01,
3.00000000e-01, 1.00000000e-01, 2.00000000e-01,
3.50000000e-01, 8.50000000e-01, 0.00000000e+00,
1.00000000e+00, 9.50000000e-01, 2.50000000e-01,
6.50000000e-01, 8.50000000e-01, 7.00000000e-01,
2.00000000e-01, 6.50000000e-01, 9.00000000e-01,
1.00000000e-01, 3.50000000e-01, 8.50000000e-01,
4.00000000e-01, 6.50000000e-01, 2.50000000e-01,
3.50000000e-01, 8.00000000e-01, 9.00000000e-01,
0.00000000e+00, 5.00000000e-01, 1.00000000e+00])),
lawson25=TestDataSet(
x=np.array([ 0.1375, 0.9125, 0.7125, 0.225 , -0.05 , 0.475 , 0.05 ,
0.45 , 1.0875, 0.5375, -0.0375, 0.1875, 0.7125, 0.85 ,
0.7 , 0.275 , 0.45 , 0.8125, 0.45 , 1. , 0.5 ,
0.1875, 0.5875, 1.05 , 0.1 ]),
y=np.array([ 0.975 , 0.9875 , 0.7625 , 0.8375 , 0.4125 , 0.6375 ,
-0.05 , 1.0375 , 0.55 , 0.8 , 0.75 , 0.575 ,
0.55 , 0.4375 , 0.3125 , 0.425 , 0.2875 , 0.1875 ,
-0.0375 , 0.2625 , 0.4625 , 0.2625 , 0.125 , -0.06125, 0.1125 ])),
random100=TestDataSet(
x=np.array([ 0.0096326, 0.0216348, 0.029836 , 0.0417447, 0.0470462,
0.0562965, 0.0646857, 0.0740377, 0.0873907, 0.0934832,
0.1032216, 0.1110176, 0.1181193, 0.1251704, 0.132733 ,
0.1439536, 0.1564861, 0.1651043, 0.1786039, 0.1886405,
0.2016706, 0.2099886, 0.2147003, 0.2204141, 0.2343715,
0.240966 , 0.252774 , 0.2570839, 0.2733365, 0.2853833,
0.2901755, 0.2964854, 0.3019725, 0.3125695, 0.3307163,
0.3378504, 0.3439061, 0.3529922, 0.3635507, 0.3766172,
0.3822429, 0.3869838, 0.3973137, 0.4170708, 0.4255588,
0.4299218, 0.4372839, 0.4705033, 0.4736655, 0.4879299,
0.494026 , 0.5055324, 0.5162593, 0.5219219, 0.5348529,
0.5483213, 0.5569571, 0.5638611, 0.5784908, 0.586395 ,
0.5929148, 0.5987839, 0.6117561, 0.6252296, 0.6331381,
0.6399048, 0.6488972, 0.6558537, 0.6677405, 0.6814074,
0.6887812, 0.6940896, 0.7061687, 0.7160957, 0.7317445,
0.7370798, 0.746203 , 0.7566957, 0.7699998, 0.7879347,
0.7944014, 0.8164468, 0.8192794, 0.8368405, 0.8500993,
0.8588255, 0.8646496, 0.8792329, 0.8837536, 0.8900077,
0.8969894, 0.9044917, 0.9083947, 0.9203972, 0.9347906,
0.9434519, 0.9490328, 0.9569571, 0.9772067, 0.9983493]),
y=np.array([ 0.3083158, 0.2450434, 0.8613847, 0.0977864, 0.3648355,
0.7156339, 0.5311312, 0.9755672, 0.1781117, 0.5452797,
0.1603881, 0.7837139, 0.9982015, 0.6910589, 0.104958 ,
0.8184662, 0.7086405, 0.4456593, 0.1178342, 0.3189021,
0.9668446, 0.7571834, 0.2016598, 0.3232444, 0.4368583,
0.8907869, 0.064726 , 0.5692618, 0.2947027, 0.4332426,
0.3347464, 0.7436284, 0.1066265, 0.8845357, 0.515873 ,
0.9425637, 0.4799701, 0.1783069, 0.114676 , 0.8225797,
0.2270688, 0.4073598, 0.887508 , 0.7631616, 0.9972804,
0.4959884, 0.3410421, 0.249812 , 0.6409007, 0.105869 ,
0.5411969, 0.0089792, 0.8784268, 0.5515874, 0.4038952,
0.1654023, 0.2965158, 0.3660356, 0.0366554, 0.950242 ,
0.2638101, 0.9277386, 0.5377694, 0.7374676, 0.4674627,
0.9186109, 0.0416884, 0.1291029, 0.6763676, 0.8444238,
0.3273328, 0.1893879, 0.0645923, 0.0180147, 0.8904992,
0.4160648, 0.4688995, 0.2174508, 0.5734231, 0.8853319,
0.8018436, 0.6388941, 0.8931002, 0.1000558, 0.2789506,
0.9082948, 0.3259159, 0.8318747, 0.0508513, 0.970845 ,
0.5120548, 0.2859716, 0.9581641, 0.6183429, 0.3779934,
0.4010423, 0.9478657, 0.7425486, 0.8883287, 0.549675 ])),
uniform9=TestDataSet(
x=np.array([ 1.25000000e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([ 0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00])),
)
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
import matplotlib as mpl
from matplotlib import pylab as pl
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
pl.ioff()
pl.clf()
pl.hot() # Some like it hot
if plotter == 'imshow':
pl.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
pl.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = pl.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
pl.title('%s: %s' % (func.title, title))
else:
pl.title(title)
pl.show()
pl.ion()
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def plotallfuncs(allfuncs=allfuncs):
from matplotlib import pylab as pl
pl.ioff()
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
print func.title
nnt.plot(func, interp=False, plotter='imshow')
pl.savefig('%s-ref-img.png' % func.func_name)
nnt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-nn-img.png' % func.func_name)
lpt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-lin-img.png' % func.func_name)
nnt.plot(func, interp=False, plotter='contour')
pl.savefig('%s-ref-con.png' % func.func_name)
nnt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-nn-con.png' % func.func_name)
lpt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-lin-con.png' % func.func_name)
pl.ion()
def plot_dt(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0,0,0,0.2)]
lc = mpl.collections.LineCollection(np.array([((tri.x[i], tri.y[i]), (tri.x[j], tri.y[j]))
for i, j in tri.edge_db]), colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_vo(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0,1,0,0.2)]
lc = mpl.collections.LineCollection(np.array(
[(tri.circumcenters[i], tri.circumcenters[j])
for i in xrange(len(tri.circumcenters))
for j in tri.triangle_neighbors[i] if j != -1]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_cc(tri, edgecolor=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if edgecolor is None:
edgecolor = (0,0,1,0.2)
dxy = (np.array([(tri.x[i], tri.y[i]) for i,j,k in tri.triangle_nodes])
- tri.circumcenters)
r = np.hypot(dxy[:,0], dxy[:,1])
ax = pl.gca()
for i in xrange(len(r)):
p = mpl.patches.Circle(tri.circumcenters[i], r[i], resolution=100, edgecolor=edgecolor,
facecolor=(1,1,1,0), linewidth=0.2)
ax.add_patch(p)
pl.draw_if_interactive()
def quality(func, mesh, interpolator='nn', n=33):
"""Compute a quality factor (the quantity r**2 from TOMS792).
interpolator must be in ('linear', 'nn').
"""
fz = func(mesh.x, mesh.y)
tri = Triangulation(mesh.x, mesh.y)
intp = getattr(tri, interpolator+'_extrapolator')(fz, bbox=(0.,1.,0.,1.))
Y, X = np.mgrid[0:1:complex(0,n),0:1:complex(0,n)]
Z = func(X, Y)
iz = intp[0:1:complex(0,n),0:1:complex(0,n)]
#nans = np.isnan(iz)
#numgood = n*n - np.sum(np.array(nans.flat, np.int32))
numgood = n*n
SE = (Z - iz)**2
SSE = np.sum(SE.flat)
meanZ = np.sum(Z.flat) / numgood
SM = (Z - meanZ)**2
SSM = np.sum(SM.flat)
r2 = 1.0 - SSE/SSM
print func.func_name, r2, SSE, SSM, numgood
return r2
def allquality(interpolator='nn', allfuncs=allfuncs, data=data, n=33):
results = {}
kv = data.items()
kv.sort()
for name, mesh in kv:
reslist = results.setdefault(name, [])
for func in allfuncs:
reslist.append(quality(func, mesh, interpolator, n))
return results
def funky():
x0 = np.array([0.25, 0.3, 0.5, 0.6, 0.6])
y0 = np.array([0.2, 0.35, 0.0, 0.25, 0.65])
tx = 0.46
ty = 0.23
t0 = Triangulation(x0, y0)
t1 = Triangulation(np.hstack((x0, [tx])), np.hstack((y0, [ty])))
return t0, t1
| agpl-3.0 |
aewhatley/scikit-learn | sklearn/utils/tests/test_validation.py | 133 | 18339 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
epierson9/multiphenotype_methods | multiphenotype_utils.py | 1 | 8040 | import pandas as pd
import numpy as np
import copy, math, random
import matplotlib.pyplot as plt
from scipy.stats import spearmanr, pearsonr
from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
from scipy.spatial.distance import squareform
def move_last_col_to_first(df):
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df.loc[:, cols]
return df
def compute_correlation_matrix_with_incomplete_data(df, correlation_type):
"""
Given a dataframe or numpy array df and a correlation type (spearman, pearson, or covariance) computes the pairwise correlations between
all columns of the dataframe. Dataframe can have missing data; these will simply be ignored.
Nan correlations are set to 0 with a warning.
Returns the correlation matrix and a vector of counts of non-missing data.
For correlation_type == covariance, identical to np.cov(df.T, ddof = 0) in case of no missing data.
"""
X = copy.deepcopy(pd.DataFrame(df)) # make sure we are using a dataframe to do computations.
assert correlation_type in ['spearman', 'pearson', 'covariance']
X = X.astype(np.float64) # if we do not do this for some reason it ignores some columns in computing the correlation matrix.
# which ends up being the wrong shape.
if correlation_type == 'covariance':
C = X.cov() * (len(df) - 1) / len(df) # need correction factor so it's consistent with ddof = 0. Makes little difference.
else:
C = X.corr(correlation_type)
C = np.array(C)
assert C.shape[0] == C.shape[1]
assert C.shape[0] == len(df.columns)
for i in range(len(C)):
for j in range(len(C)):
if np.isnan(C[i][j]):
print("Warning: entry of covariance matrix is nan; setting to 0.")
C[i][j] = 0
non_missing_data_counts = (~pd.isnull(X)).sum(axis = 0)
return C, non_missing_data_counts
def partition_dataframe_into_binary_and_continuous(df, verbose=False):
"""
Partitions a data frame into binary and continuous features.
This is used for the autoencoder so we apply the correct loss function.
Returns a matrix X of df values along the column indices of binary and continuous features
and the feature names.
"""
#print("Partitioning dataframe into binary and continuous columns")
phenotypes_to_exclude = [
'individual_id',
'age_sex___age']
feature_names = []
binary_features = []
continuous_features = []
for c in df.columns:
if c in phenotypes_to_exclude:
continue
assert len(df[c].dropna()) == len(df)
if set(df[c]) == set([False, True]):
# this binarization should work even if df[c] is eg 1.0 or 1 rather than True.
if verbose:
print("Binary column %s" % c)
binary_features.append(c)
else:
if verbose:
print("Continuous column %s" % c)
continuous_features.append(c)
feature_names.append(c)
binary_feature_idxs = [feature_names.index(a) for a in binary_features]
continuous_feature_idxs = [feature_names.index(a) for a in continuous_features]
X = df[feature_names].values
return X, binary_feature_idxs, continuous_feature_idxs, feature_names
def compute_column_means_with_incomplete_data(df):
"""
Given a dataframe or numpy array df, computes means for each column.
Identical to np.array(data.df).mean(axis = 0) in case of no missing data.
"""
X = np.array(df)
return np.nanmean(X, axis = 0)
def cluster_and_plot_correlation_matrix(C, column_names, how_to_sort):
"""
Given a correlation matrix c and column_names, sorts correlation matrix using hierarchical clustering if
how_to_sort == hierarchical, otherwise alphabetically.
"""
C = copy.deepcopy(C)
if np.abs(C).max() - 1 > 1e-6:
print("Warning: maximum absolute value in C is %2.3f, which is larger than 1; this will be truncated in the visualization." % np.abs(C).max())
for i in range(len(C)):
if(np.abs(C[i, i] - 1) > 1e-6):
print("Warning: correlation matrix diagonal entry is not one (%2.8f); setting to one for visualization purposes." % C[i, i].mean())
C[i, i] = 1 # make it exactly one so hierarchical clustering doesn't complain.
C[C > 1] = 1
C[C < -1] = -1
assert how_to_sort in ['alphabetically', 'hierarchical']
assert(len(C) == len(column_names))
if how_to_sort == 'hierarchical':
y = squareform(1 - np.abs(C))
Z = linkage(y, method = 'average')
clusters = fcluster(Z, t = 0)
# print(clusters)
reordered_idxs = np.argsort(clusters)
else:
reordered_idxs = np.argsort(column_names)
C = C[:, reordered_idxs]
C = C[reordered_idxs, :]
plt.figure(figsize=[50, 50])
plt.set_cmap('bwr')
plt.imshow(C, vmin = -1, vmax = 1)
reordered_colnames = np.array(column_names)[reordered_idxs]
plt.yticks(range(len(column_names)),
reordered_colnames,
fontsize = 24)
plt.xticks(range(len(column_names)),
reordered_colnames,
rotation = 90,
fontsize = 24)
plt.colorbar()
for i in range(len(C)):
for j in range(len(C)):
if np.abs(C[i][j]) > .1:
plt.scatter([i], [j], color = 'black', s = 1)
plt.show()
def get_continuous_features_as_matrix(df, return_cols=False):
X, binary_feature_idxs, continuous_feature_idxs, feature_names = partition_dataframe_into_binary_and_continuous(df)
X_continuous = X[:, continuous_feature_idxs]
continuous_feature_names = [feature_names[idx] for idx in continuous_feature_idxs]
# Sanity checks
sanity_check_non_continuous_phenotypes = [
'individual_id',
'age_sex___age',
'age_sex___self_report_female']
for phenotype in sanity_check_non_continuous_phenotypes:
assert phenotype not in continuous_feature_names
if return_cols:
return X_continuous, continuous_feature_names
else:
return X_continuous
def assert_zero_mean(df):
print(np.mean(get_continuous_features_as_matrix(df), axis=0))
assert np.all(np.mean(get_continuous_features_as_matrix(df), axis=0) < 1e-8)
def add_id(Z, df_with_id):
"""
Takes in a matrix Z and data frame df_with_id
and converts Z into a data frame with individual_id taken from df_with_id.
Assumes that rows of Z are aligned with rows of df_with_id.
"""
assert Z.shape[0] == df_with_id.shape[0]
assert 'individual_id' in df_with_id.columns
results_df = pd.DataFrame(Z)
results_df.index = list(df_with_id.index) # make sure the two dataframes have the same index.
results_df.loc[:, 'individual_id'] = df_with_id.loc[:, 'individual_id'].values # similarly with individual id.
results_df = move_last_col_to_first(results_df)
return results_df
def remove_id_and_get_mat(Z_df):
assert Z_df.columns[0] == 'individual_id'
return Z_df.drop('individual_id', axis=1).values
def make_age_bins(bin_size=1, lower=40, upper=69):
"""
Returns bins such that np.digitize(x, bins) does the right thing.
"""
bins = np.arange(lower, upper+1, bin_size)
bins = np.append(bins, upper+1)
print(bins)
return bins
def compute_column_means_with_incomplete_data(df):
"""
Given a dataframe or numpy array df, computes means for each column.
Identical to np.array(data.df).mean(axis = 0) in case of no missing data.
"""
X = np.array(df)
return np.nanmean(X, axis = 0)
def divide_idxs_into_batches(idxs, batch_size):
"""
Given a list of idxs and a batch size, divides into batches.
"""
n_examples = len(idxs)
n_batches = math.ceil(n_examples / batch_size)
batches = []
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
batches.append(idxs[start:end])
return batches
| mit |
davemccormick/pyAnimalTrack | src/pyAnimalTrack/backend/filehandlers/filtered_sensor_data.py | 1 | 2015 | import pandas as pd
from pyAnimalTrack.backend.filehandlers.input_data import InputData
class FilteredSensorData(InputData):
def __init__(self, filter_class, df, filter_parameters):
""" Constructor
:param filter_class: The filter to use
:param df: The Pandas dataframe to filter
:param sample_rate: The sample rate of the data, in Hz
:param cutoff_frequency: ???
:param filter_length: ???
:returns: void
"""
super(FilteredSensorData, self).__init__()
self.__df = df.copy()
# TODO: Remove this duplication of names - potentially into input_data, or a new parent class?
self.__names =['ms','ax','ay','az','mx','my','mz','gx','gy','gz','temp','adjms']
self.__readableNames = ['Milliseconds', 'AX', 'AY', 'AZ', 'MX', 'MY', 'MZ', 'GX', 'GY', 'GZ', 'Temperature', 'Adjusted Milliseconds']
filtered_names = self.__names[1:-2]
new_values = {}
# We need to filter the data, with the provided parameters
for column in range(0, len(self.__names)):
curr_name = self.__names[column]
# Only run the filter on the columns that require it
if curr_name in filtered_names:
# Create a new column of data
new_values[curr_name] = filter_class(getattr(self.__df, curr_name).values)\
.filter(
filter_parameters[curr_name]['SampleRate'],
filter_parameters[curr_name]['CutoffFrequency'],
filter_parameters[curr_name]['FilterLength']
)
else:
# Otherwise, just copy the value
new_values[curr_name] = getattr(self.__df, curr_name).values
self.__df = pd.DataFrame(new_values)
def getData(self):
return self.__df
def getColumns(self):
return self.__names
def getReadableColumns(self):
return self.__readableNames
| gpl-3.0 |
zxsted/scipy | doc/source/tutorial/stats/plots/kde_plot3.py | 132 | 1229 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(12456)
x1 = np.random.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| bsd-3-clause |
intel-analytics/analytics-zoo | pyzoo/zoo/automl/model/base_pytorch_model.py | 1 | 15238 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from torch.utils.data import TensorDataset, DataLoader
import types
from zoo.automl.model.abstract import BaseModel, ModelBuilder
from zoo.automl.common.util import *
from zoo.automl.common.metrics import Evaluator
import pandas as pd
from zoo.orca.automl.pytorch_utils import LR_NAME, DEFAULT_LR
PYTORCH_REGRESSION_LOSS_MAP = {"mse": "MSELoss",
"mae": "L1Loss",
"huber_loss": "SmoothL1Loss"}
class PytorchBaseModel(BaseModel):
def __init__(self, model_creator, optimizer_creator, loss_creator,
check_optional_config=False):
self.check_optional_config = check_optional_config
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
self.config = None
self.model = None
self.model_built = False
self.onnx_model = None
self.onnx_model_built = False
def _create_loss(self):
if isinstance(self.loss_creator, torch.nn.modules.loss._Loss):
self.criterion = self.loss_creator
else:
self.criterion = self.loss_creator(self.config)
def _create_optimizer(self):
import types
if isinstance(self.optimizer_creator, types.FunctionType):
self.optimizer = self.optimizer_creator(self.model, self.config)
else:
# use torch default parameter values if user pass optimizer name or optimizer class.
try:
self.optimizer = self.optimizer_creator(self.model.parameters(),
lr=self.config.get(LR_NAME, DEFAULT_LR))
except:
raise ValueError("We failed to generate an optimizer with specified optim "
"class/name. You need to pass an optimizer creator function.")
def build(self, config):
# check config and update
self._check_config(**config)
self.config = config
# build model
if "selected_features" in config:
config["input_feature_num"] = len(config['selected_features'])\
+ config['output_feature_num']
self.model = self.model_creator(config)
if not isinstance(self.model, torch.nn.Module):
raise ValueError("You must create a torch model in model_creator")
self.model_built = True
self._create_loss()
self._create_optimizer()
def _reshape_input(self, x):
if x.ndim == 1:
x = x.reshape(-1, 1)
return x
def _np_to_creator(self, data):
def data_creator(config):
x, y = PytorchBaseModel.covert_input(data)
x = self._reshape_input(x)
y = self._reshape_input(y)
return DataLoader(TensorDataset(x, y),
batch_size=int(config["batch_size"]),
shuffle=True)
return data_creator
def fit_eval(self, data, validation_data=None, mc=False, verbose=0, epochs=1, metric=None,
metric_func=None,
**config):
"""
:param data: data could be a tuple with numpy ndarray with form (x, y) or a
data creator which takes a config dict and returns a
torch.utils.data.DataLoader. torch.Tensor should be generated from the
dataloader.
:param validation_data: validation data could be a tuple with numpy ndarray
with form (x, y) or a data creator which takes a config dict and returns a
torch.utils.data.DataLoader. torch.Tensor should be generated from the
dataloader.
fit_eval will build a model at the first time it is built
config will be updated for the second or later times with only non-model-arch
params be functional
TODO: check the updated params and decide if the model is needed to be rebuilt
"""
# todo: support input validation data None
assert validation_data is not None, "You must input validation data!"
if not metric:
raise ValueError("You must input a valid metric value for fit_eval.")
# update config settings
def update_config():
if not isinstance(data, types.FunctionType):
x = self._reshape_input(data[0])
y = self._reshape_input(data[1])
config.setdefault("past_seq_len", x.shape[-2])
config.setdefault("future_seq_len", y.shape[-2])
config.setdefault("input_feature_num", x.shape[-1])
config.setdefault("output_feature_num", y.shape[-1])
if not self.model_built:
update_config()
self.build(config)
else:
tmp_config = self.config.copy()
tmp_config.update(config)
self._check_config(**tmp_config)
self.config.update(config)
# get train_loader and validation_loader
if isinstance(data, types.FunctionType):
train_loader = data(self.config)
validation_loader = validation_data(self.config)
else:
assert isinstance(data, tuple) and isinstance(validation_data, tuple),\
f"data/validation_data should be a tuple or\
data creator function but found {type(data)}"
assert isinstance(data[0], np.ndarray) and isinstance(validation_data[0], np.ndarray),\
f"x should be a np.ndarray but found {type(x)}"
assert isinstance(data[1], np.ndarray) and isinstance(validation_data[1], np.ndarray),\
f"y should be a np.ndarray but found {type(y)}"
train_data_creator = self._np_to_creator(data)
valid_data_creator = self._np_to_creator(validation_data)
train_loader = train_data_creator(self.config)
validation_loader = valid_data_creator(self.config)
epoch_losses = []
for i in range(epochs):
train_loss = self._train_epoch(train_loader)
epoch_losses.append(train_loss)
train_stats = {"loss": np.mean(epoch_losses), "last_loss": epoch_losses[-1]}
val_stats = self._validate(validation_loader, metric_name=metric, metric_func=metric_func)
self.onnx_model_built = False
return val_stats
@staticmethod
def to_torch(inp):
if isinstance(inp, np.ndarray):
return torch.from_numpy(inp)
if isinstance(inp, (pd.DataFrame, pd.Series)):
return torch.from_numpy(inp.values)
return inp
@staticmethod
def covert_input(data):
x = PytorchBaseModel.to_torch(data[0]).float()
y = PytorchBaseModel.to_torch(data[1]).float()
return x, y
def _train_epoch(self, train_loader):
self.model.train()
total_loss = 0
batch_idx = 0
tqdm = None
try:
from tqdm import tqdm
pbar = tqdm(total=len(train_loader))
except ImportError:
pass
for x_batch, y_batch in train_loader:
self.optimizer.zero_grad()
yhat = self._forward(x_batch, y_batch)
loss = self.criterion(yhat, y_batch)
loss.backward()
self.optimizer.step()
total_loss += loss.item()
batch_idx += 1
if tqdm:
pbar.set_description("Loss: {}".format(loss.item()))
pbar.update(1)
if tqdm:
pbar.close()
train_loss = total_loss/batch_idx
return train_loss
def _forward(self, x, y):
return self.model(x)
def _validate(self, validation_loader, metric_name, metric_func=None):
if not metric_name:
assert metric_func, "You must input valid metric_func or metric_name"
metric_name = metric_func.__name__
self.model.eval()
with torch.no_grad():
yhat_list = []
y_list = []
for x_valid_batch, y_valid_batch in validation_loader:
yhat_list.append(self.model(x_valid_batch).numpy())
y_list.append(y_valid_batch.numpy())
yhat = np.concatenate(yhat_list, axis=0)
y = np.concatenate(y_list, axis=0)
# val_loss = self.criterion(yhat, y)
if metric_func:
eval_result = metric_func(y, yhat)
else:
eval_result = Evaluator.evaluate(metric=metric_name,
y_true=y, y_pred=yhat,
multioutput='uniform_average')
return {metric_name: eval_result}
def _print_model(self):
# print model and parameters
print(self.model)
print(len(list(self.model.parameters())))
for i in range(len(list(self.model.parameters()))):
print(list(self.model.parameters())[i].size())
def evaluate(self, x, y, metrics=['mse'], multioutput="raw_values"):
# reshape 1dim input
x = self._reshape_input(x)
y = self._reshape_input(y)
yhat = self.predict(x)
eval_result = [Evaluator.evaluate(m, y_true=y, y_pred=yhat, multioutput=multioutput)
for m in metrics]
return eval_result
def predict(self, x, mc=False, batch_size=32):
# reshape 1dim input
x = self._reshape_input(x)
if not self.model_built:
raise RuntimeError("You must call fit_eval or restore first before calling predict!")
x = PytorchBaseModel.to_torch(x).float()
if mc:
self.model.train()
else:
self.model.eval()
test_loader = DataLoader(TensorDataset(x),
batch_size=int(batch_size))
yhat_list = []
for x_test_batch in test_loader:
yhat_list.append(self.model(x_test_batch[0]).detach().numpy())
yhat = np.concatenate(yhat_list, axis=0)
return yhat
def predict_with_uncertainty(self, x, n_iter=100):
result = np.zeros((n_iter,) + (x.shape[0], self.config["output_feature_num"]))
for i in range(n_iter):
result[i, :, :] = self.predict(x, mc=True)
prediction = result.mean(axis=0)
uncertainty = result.std(axis=0)
return prediction, uncertainty
def state_dict(self):
state = {
"config": self.config,
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
return state
def load_state_dict(self, state):
self.config = state["config"]
self.model = self.model_creator(self.config)
self.model.load_state_dict(state["model"])
self.model_built = True
self._create_optimizer()
self.optimizer.load_state_dict(state["optimizer"])
self._create_loss()
def save(self, checkpoint):
if not self.model_built:
raise RuntimeError("You must call fit_eval or restore first before calling save!")
state_dict = self.state_dict()
torch.save(state_dict, checkpoint)
def restore(self, checkpoint):
state_dict = torch.load(checkpoint)
self.load_state_dict(state_dict)
def evaluate_with_onnx(self, x, y, metrics=['mse'], dirname=None, multioutput="raw_values"):
# reshape 1dim input
x = self._reshape_input(x)
y = self._reshape_input(y)
yhat = self.predict_with_onnx(x, dirname=dirname)
eval_result = [Evaluator.evaluate(m, y_true=y, y_pred=yhat, multioutput=multioutput)
for m in metrics]
return eval_result
def _build_onnx(self, x, dirname=None):
if not self.model_built:
raise RuntimeError("You must call fit_eval or restore\
first before calling onnx methods!")
try:
import onnx
import onnxruntime
except:
raise RuntimeError("You should install onnx and onnxruntime to use onnx based method.")
if dirname is None:
dirname = tempfile.mkdtemp(prefix="onnx_cache_")
# code adapted from
# https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
torch.onnx.export(self.model,
x,
os.path.join(dirname, "cache.onnx"),
export_params=True,
opset_version=10,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
self.onnx_model = onnx.load(os.path.join(dirname, "cache.onnx"))
onnx.checker.check_model(self.onnx_model)
self.ort_session = onnxruntime.InferenceSession(os.path.join(dirname, "cache.onnx"))
self.onnx_model_built = True
def predict_with_onnx(self, x, mc=False, dirname=None):
# reshape 1dim input
x = self._reshape_input(x)
x = PytorchBaseModel.to_torch(x).float()
if not self.onnx_model_built:
self._build_onnx(x[0:1], dirname=dirname)
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
ort_inputs = {self.ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = self.ort_session.run(None, ort_inputs)
return ort_outs[0]
def _get_required_parameters(self):
return {}
def _get_optional_parameters(self):
return {"batch_size",
LR_NAME,
"dropout",
"optim",
"loss"
}
class PytorchModelBuilder(ModelBuilder):
def __init__(self, model_creator,
optimizer_creator,
loss_creator):
from zoo.orca.automl.pytorch_utils import validate_pytorch_loss, validate_pytorch_optim
self.model_creator = model_creator
optimizer = validate_pytorch_optim(optimizer_creator)
self.optimizer_creator = optimizer
loss = validate_pytorch_loss(loss_creator)
self.loss_creator = loss
def build(self, config):
model = PytorchBaseModel(self.model_creator,
self.optimizer_creator,
self.loss_creator)
model.build(config)
return model
| apache-2.0 |
hsiaoyi0504/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
robbymeals/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
alexeyum/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 127 | 1270 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure('scikit-learn parallel %s benchmark results' % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel('n_samples')
plt.ylabel('Time (s)')
plt.title('Parallel %s' % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
| bsd-3-clause |
ERA-URBAN/hurp | hurp/Traffic2wrfchemi_v4.py | 1 | 22560 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import netCDF4 as nc
import os
from sys import platform
def traffic2wrfchemi(datapath, wrfchemipath, month, day):
# SNAP 1 2 3 4 5 6 7 8 9 10 11 12 13 14
TP_moy = dict() # Month of Year
TP_dow = dict() # Day of Week
TP_hod = dict() # Hour of Day
TP_moy['Jan'] = np.array([1.000, 1.060, 0.950, 1.060, 1.000, 1.200, 0.880, 0.450, 1.000, 1.060, 1.200, 1.000, 0.880, 0.880])
TP_moy['Feb'] = np.array([1.000, 1.047, 0.960, 1.047, 1.000, 1.150, 0.920, 1.300, 1.000, 1.047, 1.200, 1.000, 0.920, 0.920])
TP_moy['Mar'] = np.array([1.000, 1.035, 1.020, 1.035, 1.000, 1.050, 0.980, 2.350, 1.000, 1.035, 1.200, 1.000, 0.980, 0.980])
TP_moy['Apr'] = np.array([1.000, 1.010, 1.000, 1.010, 1.000, 1.000, 1.030, 1.700, 1.000, 1.010, 0.800, 1.000, 1.030, 1.030])
TP_moy['May'] = np.array([1.000, 0.985, 1.010, 0.985, 1.000, 0.900, 1.050, 0.850, 1.000, 0.985, 0.800, 1.000, 1.050, 1.050])
TP_moy['Jun'] = np.array([1.000, 0.960, 1.030, 0.960, 1.000, 0.850, 1.060, 0.850, 1.000, 0.960, 0.800, 1.000, 1.060, 1.060])
TP_moy['Jul'] = np.array([1.000, 0.965, 1.030, 0.965, 1.000, 0.800, 1.010, 0.850, 1.000, 0.965, 0.800, 1.000, 1.010, 1.010])
TP_moy['Aug'] = np.array([1.000, 0.935, 1.010, 0.935, 1.000, 0.875, 1.020, 1.000, 1.000, 0.935, 0.800, 1.000, 1.020, 1.020])
TP_moy['Sep'] = np.array([1.000, 0.995, 1.040, 0.995, 1.000, 0.950, 1.060, 1.100, 1.000, 0.995, 0.800, 1.000, 1.060, 1.060])
TP_moy['Oct'] = np.array([1.000, 1.010, 1.030, 1.010, 1.000, 1.000, 1.050, 0.650, 1.000, 1.010, 1.200, 1.000, 1.050, 1.050])
TP_moy['Nov'] = np.array([1.000, 1.023, 1.010, 1.023, 1.000, 1.075, 1.010, 0.450, 1.000, 1.023, 1.200, 1.000, 1.010, 1.010])
TP_moy['Dec'] = np.array([1.000, 0.975, 0.910, 0.975, 1.000, 1.150, 0.930, 0.450, 1.000, 0.975, 1.200, 1.000, 0.930, 0.930])
TP_dow['Mon'] = np.array([1.000, 1.050, 1.200, 1.050, 1.000, 1.060, 1.020, 1.000, 1.000, 1.050, 1.000, 1.000, 1.000, 1.020])
TP_dow['Tue'] = np.array([1.000, 1.050, 1.200, 1.050, 1.000, 1.060, 1.060, 1.000, 1.000, 1.050, 1.000, 1.000, 1.000, 1.060])
TP_dow['Wed'] = np.array([1.000, 1.050, 1.200, 1.050, 1.000, 1.060, 1.080, 1.000, 1.000, 1.050, 1.000, 1.000, 1.000, 1.080])
TP_dow['Thu'] = np.array([1.000, 1.050, 1.200, 1.050, 1.000, 1.060, 1.100, 1.000, 1.000, 1.050, 1.000, 1.000, 1.000, 1.100])
TP_dow['Fri'] = np.array([1.000, 1.050, 1.200, 1.050, 1.000, 1.060, 1.140, 1.000, 1.000, 1.050, 1.000, 1.000, 1.000, 1.140])
TP_dow['Sat'] = np.array([1.000, 0.910, 0.500, 0.910, 1.000, 0.850, 0.810, 1.000, 1.000, 0.910, 1.000, 1.000, 1.000, 0.810])
TP_dow['Sun'] = np.array([1.000, 0.840, 0.500, 0.840, 1.000, 0.850, 0.790, 1.000, 1.000, 0.840, 1.000, 1.000, 1.000, 0.790])
TP_hod['00'] = np.array([1.000, 0.875, 0.500, 0.875, 1.000, 0.790, 0.190, 0.600, 1.000, 0.875, 1.000, 1.000, 1.000, 0.190])
TP_hod['01'] = np.array([1.000, 0.875, 0.350, 0.875, 1.000, 0.720, 0.090, 0.600, 1.000, 0.875, 1.000, 1.000, 1.000, 0.090])
TP_hod['02'] = np.array([1.000, 0.890, 0.200, 0.890, 1.000, 0.720, 0.060, 0.600, 1.000, 0.890, 1.000, 1.000, 1.000, 0.060])
TP_hod['03'] = np.array([1.000, 0.910, 0.100, 0.910, 1.000, 0.710, 0.050, 0.600, 1.000, 0.910, 1.000, 1.000, 1.000, 0.050])
TP_hod['04'] = np.array([1.000, 0.940, 0.100, 0.940, 1.000, 0.740, 0.090, 0.600, 1.000, 0.940, 1.000, 1.000, 1.000, 0.090])
TP_hod['05'] = np.array([1.000, 0.975, 0.200, 0.975, 1.000, 0.800, 0.220, 0.650, 1.000, 0.975, 1.000, 1.000, 1.000, 0.220])
TP_hod['06'] = np.array([1.000, 1.010, 0.750, 1.010, 1.000, 0.920, 0.860, 0.750, 1.000, 1.010, 1.000, 1.000, 1.000, 0.860])
TP_hod['07'] = np.array([1.000, 1.045, 1.250, 1.045, 1.000, 1.080, 1.840, 0.900, 1.000, 1.045, 1.000, 1.000, 1.000, 1.840])
TP_hod['08'] = np.array([1.000, 1.080, 1.400, 1.080, 1.000, 1.190, 1.860, 1.100, 1.000, 1.080, 1.000, 1.000, 1.000, 1.860])
TP_hod['09'] = np.array([1.000, 1.110, 1.500, 1.110, 1.000, 1.220, 1.410, 1.350, 1.000, 1.110, 1.000, 1.000, 1.000, 1.410])
TP_hod['10'] = np.array([1.000, 1.140, 1.500, 1.140, 1.000, 1.210, 1.240, 1.450, 1.000, 1.140, 1.000, 1.000, 1.000, 1.240])
TP_hod['11'] = np.array([1.000, 1.150, 1.500, 1.150, 1.000, 1.210, 1.200, 1.600, 1.000, 1.150, 1.000, 1.000, 1.000, 1.200])
TP_hod['12'] = np.array([1.000, 1.110, 1.500, 1.110, 1.000, 1.170, 1.320, 1.650, 1.000, 1.110, 1.000, 1.000, 1.000, 1.320])
TP_hod['13'] = np.array([1.000, 1.120, 1.500, 1.120, 1.000, 1.150, 1.440, 1.750, 1.000, 1.120, 1.000, 1.000, 1.000, 1.440])
TP_hod['14'] = np.array([1.000, 1.125, 1.500, 1.125, 1.000, 1.140, 1.450, 1.700, 1.000, 1.125, 1.000, 1.000, 1.000, 1.450])
TP_hod['15'] = np.array([1.000, 1.080, 1.500, 1.080, 1.000, 1.130, 1.590, 1.550, 1.000, 1.080, 1.000, 1.000, 1.000, 1.590])
TP_hod['16'] = np.array([1.000, 1.040, 1.500, 1.040, 1.000, 1.100, 2.030, 1.350, 1.000, 1.040, 1.000, 1.000, 1.000, 2.030])
TP_hod['17'] = np.array([1.000, 1.005, 1.400, 1.005, 1.000, 1.070, 2.080, 1.100, 1.000, 1.005, 1.000, 1.000, 1.000, 2.080])
TP_hod['18'] = np.array([1.000, 0.975, 1.250, 0.975, 1.000, 1.040, 1.510, 0.900, 1.000, 0.975, 1.000, 1.000, 1.000, 1.510])
TP_hod['19'] = np.array([1.000, 0.950, 1.100, 0.950, 1.000, 1.020, 1.060, 0.750, 1.000, 0.950, 1.000, 1.000, 1.000, 1.060])
TP_hod['20'] = np.array([1.000, 0.925, 1.000, 0.925, 1.000, 1.020, 0.740, 0.650, 1.000, 0.925, 1.000, 1.000, 1.000, 0.740])
TP_hod['21'] = np.array([1.000, 0.905, 0.900, 0.905, 1.000, 1.010, 0.620, 0.600, 1.000, 0.905, 1.000, 1.000, 1.000, 0.620])
TP_hod['22'] = np.array([1.000, 0.890, 0.800, 0.890, 1.000, 0.960, 0.610, 0.600, 1.000, 0.890, 1.000, 1.000, 1.000, 0.610])
TP_hod['23'] = np.array([1.000, 0.875, 0.700, 0.875, 1.000, 0.880, 0.440, 0.600, 1.000, 0.875, 1.000, 1.000, 1.000, 0.440])
# define months/days of week
moys = ('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec')
dows = ('Mon','Tue','Wed','Thu','Fri','Sat','Sun')
infilename1 = '%s/resulting_intensities_onGrid_binnenstad.csv'%datapath
infilename2 = '%s/resulting_intensities_onGrid_snelweg.csv'%datapath
infilename3 = '%s/NEI_wrfchemi_00z_d04_%s_%s'%(wrfchemipath,moys[month-1], dows[day])
if not 'progress' in globals(): progress = list()
if not 'dataloaded' in progress:
#--- load grid from existing wrfchemi file
print('Loading grid data from %s'%infilename3)
ncfile = nc.Dataset(infilename3,'r')
xlon = ncfile.variables['XLONG'][:]
xlat = ncfile.variables['XLAT'] [:]
ncfile.close()
# --- initialise traffic intensity matrices
I_city_L = xlat * 0 # veh/24h
I_city_M = xlat * 0 # veh/24h
I_city_H = xlat * 0 # veh/24h
I_ring_L = xlat * 0 # veh/24h
I_ring_M = xlat * 0 # veh/24h
I_ring_H = xlat * 0 # veh/24h
#--- load traffic intensities for city
print('Loading traffic intensity data from %s'%infilename1)
infile = open(infilename1,'r')
iline = -1
for line in infile:
iline = iline+1
if iline > 1:
#,lat,lon,gridId,STRAATNAAM,KM_LV24_x,KM_MV24_x,KM_ZV24_x
words = line.split(',')
if not words[5]: words[5] = 0
if not words[6]: words[6] = 0
if(not words[7] or (words[7] == '\r\n')): words[7] = 0
order = np.int (words[0])
lat = np.float(words[1])
lon = np.float(words[2])
gridID = np.int (words[3])
NAME = words[4]
LVDU_x = np.float(words[5]) # light veh km's driven in grid cell in 24h
MVDU_x = np.float(words[6]) # middle veh km's driven in grid cell in 24h
ZVDU_x = np.float(words[7]) # heavy veh km's driven in grid cell in 24h
distance = np.square(xlon-lon) + np.square(xlat-lat)
iy,ix = np.where( distance == distance.min() )
# if distance[iy,ix] > 1e-10:
# print('(lat,lon) = (%12.10f,%12.10f),(xlat,xlon) = (%12.10f,%12.10f)'%(lat,lon,xlat[iy,ix],xlon[iy,ix]))
# print('(dlat,dlon)=(%f,%f)'%(xlat[iy,ix]-lat,xlon[iy,ix]-lon))
if I_city_L[iy,ix] > 0: print('warning: I_city_L[%3d,%3d] is not empty!'%(iy,ix))
if I_city_M[iy,ix] > 0: print('warning: I_city_M[%3d,%3d] is not empty!'%(iy,ix))
if I_city_H[iy,ix] > 0: print('warning: I_city_H[%3d,%3d] is not empty!'%(iy,ix))
I_city_L[iy,ix] = I_city_L[iy,ix] + LVDU_x
I_city_M[iy,ix] = I_city_M[iy,ix] + MVDU_x
I_city_H[iy,ix] = I_city_H[iy,ix] + ZVDU_x
infile.close()
words = ''
#--- load traffic intensities for ring
print('Loading traffic intensity data from %s'%infilename2)
infile = open(infilename2,'r')
iline = -1
for line in infile:
iline = iline+1
if iline > 1:
#city: ,lat,lon,gridId,NAME, LVDU_x,MVDU_x,ZVDU_x,Shape_Length_x
#ring: ,lat,lon,gridId,STRAATNAAM,LVDU_x,MVDU_x,ZVDU_x
words = line.split(',')
print words[7]
words[7] = words[7].rstrip()
if not words[5] : words[5] = 0
if not words[6] : words[6] = 0
if (not words[7] or (words[7] == '\r\n')): words[7] = 0
order = np.int (words[0])
lat = np.float(words[1])
lon = np.float(words[2])
gridID = np.int (words[3])
NAME = words[4]
LVDU_x = np.float(words[5]) # light veh km's driven in grid cell in 24h
MVDU_x = np.float(words[6]) # middle veh km's driven in grid cell in 24h
ZVDU_x = np.float(words[7]) # heavy veh km's driven in grid cell in 24h
distance = np.square(xlon-lon) + np.square(xlat-lat)
iy,ix = np.where( distance == distance.min() )
# if distance[iy,ix] > 1e-10:
# print('(lat,lon) = (%12.10f,%12.10f),(xlat,xlon) = (%12.10f,%12.10f)'%(lat,lon,xlat[iy,ix],xlon[iy,ix]))
# print('(dlat,dlon)=(%f,%f)'%(xlat[iy,ix]-lat,xlon[iy,ix]-lon))
if I_ring_L[iy,ix] > 0: print('warning: I_ring_L[%3d,%3d] is not empty!'%(iy,ix))
if I_ring_M[iy,ix] > 0: print('warning: I_ring_M[%3d,%3d] is not empty!'%(iy,ix))
if I_ring_H[iy,ix] > 0: print('warning: I_ring_H[%3d,%3d] is not empty!'%(iy,ix))
I_ring_L[iy,ix] = I_ring_L[iy,ix] + LVDU_x
I_ring_M[iy,ix] = I_ring_M[iy,ix] + MVDU_x
I_ring_H[iy,ix] = I_ring_H[iy,ix] + ZVDU_x
infile.close()
progress.append('dataloaded')
#--- Emissions
g2mole_NOx = 1./46. # M_NO2 = 46 g/mole
g2kg = 0.001
day2hr = 1/24.
A = 0.1 * 0.1 # km2
f_city_L_NOx = 0.35 * g2mole_NOx # g/km/veh NOx in NO2-equivalenten; Stad-normaal 2017; licht verkeer
f_city_M_NOx = 5.63 * g2mole_NOx # g/km/veh NOx in NO2-equivalenten; Stad-normaal 2017; middelzwaar verkeer
f_city_H_NOx = 6.78 * g2mole_NOx # g/km/veh NOx in NO2-equivalenten; Stad-normaal 2017; zwaar verkeer
f_city_L_PM10 = 0.036 * g2kg # g/km/veh PM10 (verbranding+slijtage); Stad-normaal 2017; licht verkeer
f_city_M_PM10 = 0.175 * g2kg # g/km/veh PM10 (verbranding+slijtage); Stad-normaal 2017; middelzwaar verkeer
f_city_H_PM10 = 0.183 * g2kg # g/km/veh PM10 (verbranding+slijtage); Stad-normaal 2017; zwaar verkeer
f_ring_L_NOx = 0.27 * g2mole_NOx # g/km/veh NOx in NO2-equivalenten; Snelweg-vrije doorstroming 2017; licht verkeer
f_ring_M_NOx = 2.42 * g2mole_NOx # g/km/veh NOx in NO2-equivalenten; Snelweg-vrije doorstroming 2017; middelzwaar verkeer
f_ring_H_NOx = 2.41 * g2mole_NOx # g/km/veh NOx in NO2-equivalenten; Snelweg-vrije doorstroming 2017; zwaar verkeer
f_ring_L_PM10 = 0.024 * g2kg # g/km/veh PM10 (verbranding+slijtage); Snelweg-vrije doorstroming 2017; licht verkeer
f_ring_M_PM10 = 0.097 * g2kg # g/km/veh PM10 (verbranding+slijtage); Snelweg-vrije doorstroming 2017; middelzwaar verkeer
f_ring_H_PM10 = 0.090 * g2kg # g/km/veh PM10 (verbranding+slijtage); Snelweg-vrije doorstroming 2017; zwaar verkeer
E_NOx_traf = day2hr/A * (I_city_L * f_city_L_NOx + I_city_M * f_city_M_NOx + I_city_H * f_city_H_NOx + \
I_ring_L * f_ring_L_NOx + I_ring_M * f_ring_M_NOx + I_ring_H * f_ring_H_NOx) # veh-km/day * days2hr * g/km/veh * g2mole / km2 = mole/km2/hr
E_PM10_traf = day2hr/A * (I_city_L * f_city_L_PM10 + I_city_M * f_city_M_PM10 + I_city_H * f_city_H_PM10 + \
I_ring_L * f_ring_L_PM10 + I_ring_M * f_ring_M_PM10 + I_ring_H * f_ring_H_PM10) # veh-km/day * days2hr * g/km/veh * g2kg / km2 = kg/km2/hr
if not 'wrfchemi_written' in progress:
ncinfilenames = [os.path.join(wrfchemipath,filename) for filename in os.listdir(wrfchemipath) if (filename.startswith('NEI_wrfchemi') and 'd04' in filename)]
for ncinfilename in ncinfilenames:
zTime = ncinfilename[-15:-12]
domain = 'd04'
moy = ncinfilename[ -7: -4]
dow = ncinfilename[ -3: ]
zTimeOffset = 12 if zTime == '12z' else 0
ncoutfilename = '%s/NEI_Traffic_wrfchemi_%s_%s_%s_%s'%(wrfchemipath,zTime,domain,moy,dow)
ncinfile = nc.Dataset(ncinfilename,'r')
Times = ncinfile.variables['Times' ][:]
xlat = ncinfile.variables['XLAT' ][:]
xlon = ncinfile.variables['XLONG' ][:]
E_NOx_stat = ncinfile.variables['E_NOx_stat' ][:]
E_PM10_stat = ncinfile.variables['E_PM10_stat'][:]
ncoutfile = nc.Dataset(ncoutfilename,'w')
nz = 1
nlat,nlon = xlat.shape
ncoutfile.createDimension('Time', None) # will be 12
ncoutfile.createDimension('bottom_top' , nz)
ncoutfile.createDimension('south_north', nlat)
ncoutfile.createDimension('west_east' , nlon)
ncoutfile.createDimension('DateStrLen' , 19)
ncTimes = ncoutfile.createVariable('Times' ,'c',dimensions=('Time','DateStrLen'))
ncXLAT = ncoutfile.createVariable('XLAT' ,'d',dimensions=('south_north','west_east'))
ncXLONG = ncoutfile.createVariable('XLONG' ,'d',dimensions=('south_north','west_east'))
ncE_NOx_stat = ncoutfile.createVariable('E_NOx_stat' ,'d',dimensions=('Time','bottom_top','south_north','west_east'),fill_value = -1.e34)
ncE_NOx_traf = ncoutfile.createVariable('E_NOx_traf' ,'d',dimensions=('Time','bottom_top','south_north','west_east'),fill_value = -1.e34)
ncE_PM10_stat = ncoutfile.createVariable('E_PM10_stat' ,'d',dimensions=('Time','bottom_top','south_north','west_east'),fill_value = -1.e34)
ncE_PM10_traf = ncoutfile.createVariable('E_PM10_traf' ,'d',dimensions=('Time','bottom_top','south_north','west_east'),fill_value = -1.e34)
ncTimes[:] = Times
ncXLAT.FieldType = 104.
ncXLAT.MemoryOrder = 'XY'
ncXLAT.description = 'LATITUDE, SOUTH IS NEGATIVE'
ncXLAT.units = 'degree_north'
ncXLAT.stagger = ' '
ncXLAT.coordinates = 'XLONG XLAT'
ncXLAT[:] = xlat
ncXLONG.FieldType = 104.
ncXLONG.MemoryOrder = 'XY'
ncXLONG.description = 'LONGGITUDE, WEST IS NEGATIVE'
ncXLONG.units = 'degree_north'
ncXLONG.stagger = ' '
ncXLONG.coordinates = 'XLONG XLAT'
ncXLONG[:] = xlon
ncE_NOx_stat.FieldType = 104.
ncE_NOx_stat.MemoryOrder = 'XYZ'
ncE_NOx_stat.description = 'NOx emissions from stationary sources'
ncE_NOx_stat.units = 'mole/km2/hr'
ncE_NOx_stat.stagger = ' '
ncE_NOx_stat.coordinates = 'XLONG XLAT'
ncE_NOx_stat[:] = E_NOx_stat
ncE_NOx_traf.FieldType = 104.
ncE_NOx_traf.MemoryOrder = 'XYZ'
ncE_NOx_traf.description = 'NOx emissions from road traffic'
ncE_NOx_traf.units = 'mole/km2/hr'
ncE_NOx_traf.stagger = ' '
ncE_NOx_traf.coordinates = 'XLONG XLAT'
Isnap = 13
for it in range(12):
TP = TP_hod['%02d'%(it+zTimeOffset)][Isnap] * TP_dow[dow][Isnap] * TP_moy[moy][Isnap]
ncE_NOx_traf[it,0,:,:] = TP*E_NOx_traf
# TP2 = np.tile(TP[...,np.newaxis,np.newaxis],(1,nlat,nlon))
ncE_PM10_stat.FieldType = 104.
ncE_PM10_stat.MemoryOrder = 'XYZ'
ncE_PM10_stat.description = 'PM10 emissions from stationary sources'
ncE_PM10_stat.units = 'kg/km2/hr'
ncE_PM10_stat.stagger = ' '
ncE_PM10_stat.coordinates = 'XLONG XLAT'
ncE_PM10_stat[:] = E_PM10_stat
ncE_PM10_traf.FieldType = 104.
ncE_PM10_traf.MemoryOrder = 'XYZ'
ncE_PM10_traf.description = 'PM10 emissions from road traffic'
ncE_PM10_traf.units = 'kg/km2/hr'
ncE_PM10_traf.stagger = ' '
ncE_PM10_traf.coordinates = 'XLONG XLAT'
Isnap = 13
for it in range(12):
TP = TP_hod['%02d'%(it+zTimeOffset)][Isnap] * TP_dow[dow][Isnap] * TP_moy[moy][Isnap]
# TP2 = np.tile(TP[...,np.newaxis,np.newaxis],(1,nlat,nlon))
ncE_PM10_traf[it,0,:,:] = TP*E_PM10_traf
ncoutfile.CEN_LAT = ncinfile.getncattr('CEN_LAT')
ncoutfile.CEN_LON = ncinfile.getncattr('CEN_LON')
ncoutfile.TRUELAT1 = ncinfile.getncattr('TRUELAT1')
ncoutfile.TRUELAT2 = ncinfile.getncattr('TRUELAT2')
ncoutfile.MOAD_CEN_LAT = ncinfile.getncattr('MOAD_CEN_LAT')
ncoutfile.STAND_LON = ncinfile.getncattr('STAND_LON')
ncoutfile.POLE_LAT = ncinfile.getncattr('POLE_LAT')
ncoutfile.POLE_LON = ncinfile.getncattr('POLE_LON')
ncoutfile.GMT = ncinfile.getncattr('GMT')
ncoutfile.JULYR = ncinfile.getncattr('JULYR')
ncoutfile.JULDAY = ncinfile.getncattr('JULDAY')
ncoutfile.MAP_PROJ = ncinfile.getncattr('MAP_PROJ')
ncoutfile.MMINLU = ncinfile.getncattr('MMINLU')
ncoutfile.NUM_LAND_CAT = ncinfile.getncattr('NUM_LAND_CAT')
ncoutfile.ISWATER = ncinfile.getncattr('ISWATER')
ncoutfile.ISLAKE = ncinfile.getncattr('ISLAKE')
ncoutfile.ISICE = ncinfile.getncattr('ISICE')
ncoutfile.ISURBAN = ncinfile.getncattr('ISURBAN')
ncoutfile.ISOILWATER = ncinfile.getncattr('ISOILWATER')
ncinfile.close()
ncoutfile.close()
progress.append('wrfchemi_written')
if False:
f = plt.figure(1)
f.clf()
vmax = 2000.
ax = f.add_subplot(231)
ax.pcolor(xlon,xlat,I_city_L,vmax=vmax)
ax.set_title('I city light')
ax = f.add_subplot(232)
ax.pcolor(xlon,xlat,I_city_M,vmax=vmax)
ax.set_title('I city middle')
ax = f.add_subplot(233)
ax.pcolor(xlon,xlat,I_city_H,vmax=vmax)
ax.set_title('I city heavy')
ax = f.add_subplot(234)
ax.pcolor(xlon,xlat,I_ring_L,vmax=vmax)
ax.set_title('I ring light')
ax = f.add_subplot(235)
ax.pcolor(xlon,xlat,I_ring_M,vmax=vmax)
ax.set_title('I ring middle')
ax = f.add_subplot(236)
ax.pcolor(xlon,xlat,I_ring_H,vmax=vmax)
ax.set_title('I ring heavy')
f = plt.figure(2)
f.clf()
ax = f.add_subplot(121)
ax.pcolor(xlon,xlat,E_NOx)
ax.set_title('E_NOx')
ax = f.add_subplot(122)
ax.pcolor(xlon,xlat,E_PM10)
ax.set_title('E_PM10')
f = plt.figure(3)
f.clf()
ax = f.add_subplot(111)
ax.pcolor(xlon,xlat,L_city)
if __name__=="__main__":
month = 'Mar'
if platform == 'win32':
trafficpath = 'u:\AMS_Stimulus_2016\Data_share\Verkeersintensiteiten per grid ring en binnenstad'
wrfchemipath = 'u:\AMS_Stimulus_2016\Data_share\Emissions\wrfchemi'
elif platform == 'linux2':
trafficpath = '/projects/0/aams/wrfv3/Data_share/Verkeersintensiteiten per grid ring en binnenstad'
wrfchemipath = '/projects/0/aams/wrfv3/Data_share/Emissions/wrfchemi'
traffic2wrfchemi(trafficpath, wrfchemipath, month)
| apache-2.0 |
wazeerzulfikar/scikit-learn | benchmarks/bench_plot_neighbors.py | 101 | 6469 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
plt.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = plt.subplot(sbplt, yscale='log')
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = plt.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
plt.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
plt.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
plt.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
plt.show()
| bsd-3-clause |
ephes/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
jmschrei/scikit-learn | examples/gaussian_process/plot_gpr_noisy_targets.py | 45 | 3680 | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Jan Hendrik Metzen <[email protected]>s
# Licence: BSD 3 clause
import numpy as np
from matplotlib import pyplot as pl
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
Carralex/landlab | landlab/components/chi_index/channel_chi.py | 5 | 26351 | # -*- coding: utf-8 -*-
"""
Created March 2016.
@author: dejh
"""
from __future__ import print_function
from six.moves import range # this is Python 3's generator, not P2's list
from landlab import ModelParameterDictionary, Component, FieldError, \
FIXED_VALUE_BOUNDARY, BAD_INDEX_VALUE, CLOSED_BOUNDARY
import numpy as np
try:
from itertools import izip
except ImportError:
izip = zip
class ChiFinder(Component):
"""
This component calculates chi indices, sensu Perron & Royden, 2013,
for a Landlab landscape.
Construction::
ChiFinder(grid, reference_concavity=0.5, min_drainage_area=1.e6,
reference_area=1., use_true_dx=False)
Parameters
----------
grid : RasterModelGrid
A landlab RasterModelGrid.
reference_concavity : float
The reference concavity to use in the calculation.
min_drainage_area : float (m**2)
The drainage area down to which to calculate chi.
reference_area : float or None (m**2)
If None, will default to the mean core cell area on the grid.
Else, provide a value to use. Essentially becomes a prefactor on the
value of chi.
use_true_dx : bool (default False)
If True, integration to give chi is performed using each value of node
spacing along the channel (which can lead to a quantization effect,
and is not preferred by Taylor & Royden). If False, the mean value of
node spacing along the all channels is assumed everywhere.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> from landlab.components import FlowRouter, FastscapeEroder
>>> mg = RasterModelGrid((3, 4), 1.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = CLOSED_BOUNDARY
>>> _ = mg.add_field('node', 'topographic__elevation', mg.node_x)
>>> fr = FlowRouter(mg)
>>> cf = ChiFinder(mg, min_drainage_area=1., reference_concavity=1.)
>>> fr.run_one_step()
>>> cf.calculate_chi()
>>> mg.at_node['channel__chi_index'].reshape(mg.shape)[1, :]
array([ 0.5, 1. , 2. , 0. ])
>>> mg2 = RasterModelGrid((5, 5), 100.)
>>> for nodes in (mg2.nodes_at_right_edge, mg2.nodes_at_bottom_edge,
... mg2.nodes_at_top_edge):
... mg2.status_at_node[nodes] = CLOSED_BOUNDARY
>>> _ = mg2.add_zeros('node', 'topographic__elevation')
>>> mg2.at_node['topographic__elevation'][mg2.core_nodes] = mg2.node_x[
... mg2.core_nodes]/1000.
>>> np.random.seed(0)
>>> mg2.at_node['topographic__elevation'][
... mg2.core_nodes] += np.random.rand(mg2.number_of_core_nodes)
>>> fr2 = FlowRouter(mg2)
>>> sp2 = FastscapeEroder(mg2, K_sp=0.01)
>>> cf2 = ChiFinder(mg2, min_drainage_area=0., reference_concavity=0.5)
>>> for i in range(10):
... mg2.at_node['topographic__elevation'][mg2.core_nodes] += 10.
... fr2.run_one_step()
... sp2.run_one_step(1000.)
>>> fr2.run_one_step()
>>> cf2.calculate_chi()
>>> mg2.at_node['channel__chi_index'].reshape(
... mg2.shape) # doctest: +NORMALIZE_WHITESPACE
array([[ 0. , 0. , 0. , 0. , 0. ],
[ 0.77219416, 1.54438833, 2.63643578, 2.61419437, 0. ],
[ 1.09204746, 2.18409492, 1.52214691, 2.61419437, 0. ],
[ 0.44582651, 0.89165302, 1.66384718, 2.75589464, 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
>>> cf2.calculate_chi(min_drainage_area=20000., use_true_dx=True,
... reference_area=mg2.at_node['drainage_area'].max())
>>> cf2.chi_indices.reshape(mg2.shape) # doctest: +NORMALIZE_WHITESPACE
array([[ 0. , 0. , 0. , 0. , 0. ],
[ 0. , 173.20508076, 0. , 0. , 0. ],
[ 0. , 0. , 270.71067812, 0. , 0. ],
[ 0. , 100. , 236.60254038, 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
>>> cf2.hillslope_mask.reshape(mg2.shape)
array([[ True, True, True, True, True],
[False, False, True, True, True],
[ True, True, False, True, True],
[False, False, False, True, True],
[ True, True, True, True, True]], dtype=bool)
"""
_name = 'ChiFinder'
_input_var_names = (
'topographic__elevation',
'drainage_area',
'topographic__steepest_slope',
'flow__receiver_node',
'flow__upstream_node_order',
'flow__link_to_receiver_node',
)
_output_var_names = (
'channel__chi_index',
)
_var_units = {'topographic__elevation': 'm',
'drainage_area': 'm**2',
'topographic__steepest_slope': '-',
'flow__receiver_node': '-',
'flow__upstream_node_order': '-',
'flow__link_to_receiver_node': '-',
'channel__chi_index': 'variable',
}
_var_mapping = {'topographic__elevation': 'node',
'drainage_area': 'node',
'topographic__steepest_slope': 'node',
'flow__receiver_node': 'node',
'flow__upstream_node_order': 'node',
'flow__link_to_receiver_node': 'node',
'channel__chi_index': 'node',
}
_var_doc = {'topographic__elevation': 'Surface topographic elevation',
'drainage_area': 'upstream drainage area',
'topographic__steepest_slope': ('the steepest downslope ' +
'rise/run leaving the node'),
'flow__receiver_node': ('the downstream node at the end of the ' +
'steepest link'),
'flow__upstream_node_order': ('node order such that nodes must ' +
'appear in the list after all nodes ' +
'downstream of them'),
'flow__link_to_receiver_node':
('ID of link downstream of each node, which carries the ' +
'discharge'),
'channel__chi_index': 'the local steepness index',
}
def __init__(self, grid, reference_concavity=0.5, min_drainage_area=1.e6,
reference_area=1., use_true_dx=False, **kwds):
"""
Constructor for the component.
"""
self._grid = grid
self._reftheta = reference_concavity
self.min_drainage = min_drainage_area
if reference_area is None:
try:
self._A0 = float(self.grid.cell_area_at_node)
except TypeError: # was an array
self._A0 = self.grid.cell_area_at_node[
self.grid.core_nodes].mean()
else:
assert reference_area > 0.
self._A0 = reference_area
self.use_true_dx = use_true_dx
self.chi = self._grid.add_zeros('node', 'channel__chi_index')
self._mask = self.grid.ones('node', dtype=bool)
# this one needs modifying if smooth_elev
self._elev = self.grid.at_node['topographic__elevation']
def calculate_chi(self, **kwds):
"""
This is the main method. Call it to calculate local chi indices
at all points with drainage areas greater than *min_drainage_area*.
This "run" method can optionally take the same parameter set as
provided at instantiation. If they are provided, they will override
the existing values from instantiation.
Chi of any node without a defined value is reported as 0. These nodes
are also identified in the mask retrieved with :func:`hillslope_mask`.
"""
self._mask.fill(True)
self.chi.fill(0.)
# test for new kwds:
reftheta = kwds.get('reference_concavity', self._reftheta)
min_drainage = kwds.get('min_drainage_area', self.min_drainage)
A0 = kwds.get('reference_area', self._A0)
if A0 is None:
try:
A0 = float(self.grid.cell_area_at_node)
except TypeError:
A0 = self.grid.cell_area_at_node[self.grid.core_nodes].mean()
assert A0 > 0.
use_true_dx = kwds.get('use_true_dx', self.use_true_dx)
upstr_order = self.grid.at_node['flow__upstream_node_order']
# get an array of only nodes with A above threshold:
valid_upstr_order = upstr_order[self.grid.at_node['drainage_area'][
upstr_order] >= min_drainage]
valid_upstr_areas = self.grid.at_node['drainage_area'][
valid_upstr_order]
if not use_true_dx:
chi_integrand = (A0/valid_upstr_areas)**reftheta
mean_dx = self.mean_channel_node_spacing(valid_upstr_order)
self.integrate_chi_avg_dx(valid_upstr_order, chi_integrand,
self.chi, mean_dx)
else:
chi_integrand = self.grid.zeros('node')
chi_integrand[valid_upstr_order] = (A0/valid_upstr_areas)**reftheta
self.integrate_chi_each_dx(valid_upstr_order, chi_integrand,
self.chi)
# stamp over the closed nodes, as it's possible they can receive infs
# if min_drainage_area < grid.cell_area_at_node
self.chi[self.grid.status_at_node == CLOSED_BOUNDARY] = 0.
self._mask[valid_upstr_order] = False
def integrate_chi_avg_dx(self, valid_upstr_order, chi_integrand,
chi_array, mean_dx):
"""
Calculates chi at each channel node by summing chi_integrand.
This method assumes a uniform, mean spacing between nodes. Method is
deliberately split out for potential cythonization at a later stage.
Parameters
----------
valid_upstr_order : array of ints
nodes in the channel network in upstream order.
chi_integrand : array of floats
The value (A0/A)**concavity, in upstream order.
chi_array : array of floats
Array in which to store chi.
mean_dx : float
The mean node spacing in the network.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> from landlab.components import FlowRouter
>>> mg = RasterModelGrid((5, 4), 1.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = CLOSED_BOUNDARY
>>> z = mg.node_x.copy()
>>> z[[5, 13]] = z[6] # guard nodes
>>> _ = mg.add_field('node', 'topographic__elevation', z)
>>> fr = FlowRouter(mg)
>>> cf = ChiFinder(mg)
>>> fr.run_one_step()
>>> ch_nodes = np.array([4, 8, 12, 5, 9, 13, 6, 10, 14])
>>> ch_integrand = 3.*np.ones(9, dtype=float) # to make calc clearer
>>> chi_array = np.zeros(mg.number_of_nodes, dtype=float)
>>> cf.integrate_chi_avg_dx(ch_nodes, ch_integrand, chi_array, 0.5)
>>> chi_array.reshape(mg.shape)
array([[ 0. , 0. , 0. , 0. ],
[ 1.5, 3. , 4.5, 0. ],
[ 1.5, 3. , 4.5, 0. ],
[ 1.5, 3. , 4.5, 0. ],
[ 0. , 0. , 0. , 0. ]])
"""
receivers = self.grid.at_node['flow__receiver_node']
# because chi_array is all zeros, BC cases where node is receiver
# resolve themselves
for (node, integrand) in izip(valid_upstr_order, chi_integrand):
dstr_node = receivers[node]
chi_array[node] = chi_array[dstr_node] + integrand
chi_array *= mean_dx
def integrate_chi_each_dx(self, valid_upstr_order, chi_integrand_at_nodes,
chi_array):
"""
Calculates chi at each channel node by summing chi_integrand*dx.
This method accounts explicitly for spacing between each node. Method
is deliberately split out for potential cythonization at a later
stage. Uses a trapezium integration method.
Parameters
----------
valid_upstr_order : array of ints
nodes in the channel network in upstream order.
chi_integrand_at_nodes : array of floats
The value (A0/A)**concavity, in *node* order.
chi_array : array of floats
Array in which to store chi.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> from landlab.components import FlowRouter
>>> mg = RasterModelGrid((5, 4), 3.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = CLOSED_BOUNDARY
>>> z = mg.node_x.copy()
>>> z[[5, 13]] = z[6] # guard nodes
>>> _ = mg.add_field('node', 'topographic__elevation', z)
>>> fr = FlowRouter(mg)
>>> cf = ChiFinder(mg)
>>> fr.run_one_step()
>>> ch_nodes = np.array([4, 8, 12, 5, 9, 13, 6, 10, 14])
>>> ch_integrand = 2.*np.ones(mg.number_of_nodes,
... dtype=float) # to make calc clearer
>>> chi_array = np.zeros(mg.number_of_nodes, dtype=float)
>>> cf.integrate_chi_each_dx(ch_nodes, ch_integrand, chi_array)
>>> chi_array.reshape(mg.shape)
array([[ 0. , 0. , 0. , 0. ],
[ 0. , 6. , 14.48528137, 0. ],
[ 0. , 6. , 12. , 0. ],
[ 0. , 6. , 14.48528137, 0. ],
[ 0. , 0. , 0. , 0. ]])
>>> from landlab.components import FastscapeEroder
>>> mg2 = RasterModelGrid((5, 5), 100.)
>>> for nodes in (mg2.nodes_at_right_edge, mg2.nodes_at_bottom_edge,
... mg2.nodes_at_top_edge):
... mg2.status_at_node[nodes] = CLOSED_BOUNDARY
>>> _ = mg2.add_zeros('node', 'topographic__elevation')
>>> mg2.at_node['topographic__elevation'][mg2.core_nodes] = mg2.node_x[
... mg2.core_nodes]/1000.
>>> np.random.seed(0)
>>> mg2.at_node['topographic__elevation'][
... mg2.core_nodes] += np.random.rand(mg2.number_of_core_nodes)
>>> fr2 = FlowRouter(mg2)
>>> sp2 = FastscapeEroder(mg2, K_sp=0.01)
>>> cf2 = ChiFinder(mg2, min_drainage_area=1., reference_concavity=0.5,
... use_true_dx=True)
>>> for i in range(10):
... mg2.at_node['topographic__elevation'][mg2.core_nodes] += 10.
... fr2.run_one_step()
... sp2.run_one_step(1000.)
>>> fr2.run_one_step()
>>> output_array = np.zeros(25, dtype=float)
>>> cf2.integrate_chi_each_dx(mg2.at_node['flow__upstream_node_order'],
... np.ones(25, dtype=float),
... output_array)
>>> output_array.reshape(mg2.shape)
array([[ 0. , 0. , 0. , 0. , 0. ],
[ 0. , 100. , 200. , 382.84271247, 0. ],
[ 0. , 100. , 241.42135624, 341.42135624, 0. ],
[ 0. , 100. , 200. , 300. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
"""
receivers = self.grid.at_node['flow__receiver_node']
links = self.grid.at_node['flow__link_to_receiver_node']
link_lengths = self.grid._length_of_link_with_diagonals
# because chi_array is all zeros, BC cases where node is receiver
# resolve themselves
half_integrand = 0.5 * chi_integrand_at_nodes
for node in valid_upstr_order:
dstr_node = receivers[node]
dstr_link = links[node]
if dstr_link != BAD_INDEX_VALUE:
dstr_length = link_lengths[dstr_link]
half_head_val = half_integrand[node]
half_tail_val = half_integrand[dstr_node]
mean_val = half_head_val + half_tail_val
chi_to_add = mean_val * dstr_length
chi_array[node] = chi_array[dstr_node] + chi_to_add
def mean_channel_node_spacing(self, ch_nodes):
"""
Calculates the mean spacing between all adjacent channel nodes.
Parameters
----------
ch_nodes : array of ints
The nodes within the defined channel network.
Returns
-------
mean_spacing : float (m)
The mean spacing between all nodes in the network.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> from landlab.components import FlowRouter
>>> mg = RasterModelGrid((5, 4), 2.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = CLOSED_BOUNDARY
>>> z = mg.node_x.copy()
>>> z[[5, 13]] = z[6] # guard nodes
>>> _ = mg.add_field('node', 'topographic__elevation', z)
>>> fr = FlowRouter(mg)
>>> cf = ChiFinder(mg)
>>> fr.run_one_step()
>>> ch_nodes = np.array([4, 8, 12, 5, 9, 13, 6, 10, 14])
>>> cf.mean_channel_node_spacing(ch_nodes)
2.2761423749153966
"""
ch_links = self.grid.at_node['flow__link_to_receiver_node'][ch_nodes]
ch_links_valid = ch_links[ch_links != BAD_INDEX_VALUE]
valid_link_lengths = self.grid._length_of_link_with_diagonals[
ch_links_valid]
return valid_link_lengths.mean()
@property
def chi_indices(self):
"""
Return the array of channel steepness indices.
Nodes not in the channel receive zeros.
"""
return self.chi
@property
def hillslope_mask(self):
"""
Return a boolean array, False where steepness indices exist.
"""
return self._mask
def best_fit_chi_elevation_gradient_and_intercept(self, ch_nodes=None):
"""
Returns least squares best fit for a straight line through a chi plot.
Parameters
----------
ch_nodes : array of ints or None
Nodes at which to consider chi and elevation values. If None,
will use all nodes in grid with area greater than the component
min_drainage_area.
Returns
-------
coeffs : array(gradient, intercept)
A len-2 array containing the m then z0, where z = z0 + m * chi.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> from landlab.components import FlowRouter
>>> mg = RasterModelGrid((3, 4), 1.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = CLOSED_BOUNDARY
>>> z = mg.add_field('node', 'topographic__elevation',
... mg.node_x.copy())
>>> z[4:8] = np.array([0.5, 1., 2., 0.])
>>> fr = FlowRouter(mg)
>>> cf = ChiFinder(mg, min_drainage_area=1., reference_concavity=1.)
>>> fr.run_one_step()
>>> cf.calculate_chi()
>>> mg.at_node['channel__chi_index'].reshape(mg.shape)[1, :]
array([ 0.5, 1. , 2. , 0. ])
>>> coeffs = cf.best_fit_chi_elevation_gradient_and_intercept()
>>> np.allclose(np.array([1., 0.]), coeffs)
True
"""
if ch_nodes is None:
good_vals = np.logical_not(self.hillslope_mask)
else:
good_vals = np.array(ch_nodes)
chi_vals = self.chi_indices[good_vals]
elev_vals = self.grid.at_node['topographic__elevation'][good_vals]
coeffs = np.polyfit(chi_vals, elev_vals, 1)
return coeffs
def nodes_downstream_of_channel_head(self, channel_head):
"""
Find and return an array with nodes downstream of channel_head.
Parameters
----------
channel_head : int
Node ID of channel head from which to get downstream nodes.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> from landlab.components import FlowRouter
>>> mg = RasterModelGrid((3, 4), 1.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = CLOSED_BOUNDARY
>>> z = mg.add_field('node', 'topographic__elevation',
... mg.node_x.copy())
>>> z[4:8] = np.array([0.5, 1., 2., 0.])
>>> fr = FlowRouter(mg)
>>> fr.run_one_step()
>>> mg.at_node['flow__receiver_node']
array([ 0, 1, 2, 3, 4, 4, 5, 7, 8, 9, 10, 11])
>>> cf = ChiFinder(mg, min_drainage_area=0., reference_concavity=1.)
>>> cf.calculate_chi()
>>> cf.nodes_downstream_of_channel_head(6)
[6, 5, 4]
"""
ch_nodes = []
current_node = channel_head
while True:
ch_A = self.grid.at_node['drainage_area'][current_node]
if ch_A > self.min_drainage:
ch_nodes.append(current_node)
next_node = self.grid.at_node['flow__receiver_node'][
current_node]
if next_node == current_node:
break
else:
current_node = next_node
return ch_nodes
def create_chi_plot(self, channel_heads=None, label_axes=True,
symbol='kx', plot_line=False, line_symbol='r-'):
"""
Plots a "chi plot" (chi vs elevation for points in channel network).
If channel_heads is provided, only the channel nodes downstream of
the provided points (and with area > min_drainage_area) will be
plotted.
Parameters
----------
channel_heads : int, list or array of ints, or None
Node IDs of channel heads to from which plot downstream.
label_axes : bool
If True, labels the axes as "Chi" and "Elevation (m)".
symbol : str
A matplotlib-style string for the style to use for the points.
plot_line : bool
If True, will plot a linear best fit line through the data cloud.
line_symbol : str
A matplotlib-style string for the style to use for the line, if
plot_line.
"""
from matplotlib.pyplot import plot, xlabel, ylabel, figure, clf, show
figure('Chi plot')
clf()
if channel_heads is not None:
if plot_line:
good_nodes = set()
if type(channel_heads) is int:
channel_heads = [channel_heads, ]
for head in channel_heads:
ch_nodes = self.nodes_downstream_of_channel_head(head)
plot(self.chi_indices[ch_nodes],
self.grid.at_node['topographic__elevation'][ch_nodes],
symbol)
if plot_line:
good_nodes.update(ch_nodes)
else:
ch_nodes = np.logical_not(self.hillslope_mask)
plot(self.chi_indices[ch_nodes],
self.grid.at_node['topographic__elevation'][ch_nodes],
symbol)
good_nodes = ch_nodes
if plot_line:
coeffs = self.best_fit_chi_elevation_gradient_and_intercept(
good_nodes)
p = np.poly1d(coeffs)
chirange = np.linspace(self.chi_indices[good_nodes].min(),
self.chi_indices[good_nodes].max(), 100)
plot(chirange, p(chirange), line_symbol)
if label_axes:
ylabel('Elevation (m)')
xlabel('Chi')
@property
def masked_chi_indices(self):
"""
Returns a masked array version of the 'channel__chi_index' field.
This enables easier plotting of the values with
:func:`landlab.imshow_grid_at_node` or similar.
Examples
--------
Make a topographic map with an overlay of chi values:
>>> from landlab import imshow_grid_at_node
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> from landlab.components import FlowRouter, FastscapeEroder
>>> mg = RasterModelGrid((5, 5), 100.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = CLOSED_BOUNDARY
>>> _ = mg.add_zeros('node', 'topographic__elevation')
>>> mg.at_node['topographic__elevation'][mg.core_nodes] = mg.node_x[
... mg.core_nodes]/1000.
>>> np.random.seed(0)
>>> mg.at_node['topographic__elevation'][
... mg.core_nodes] += np.random.rand(mg.number_of_core_nodes)
>>> fr = FlowRouter(mg)
>>> sp = FastscapeEroder(mg, K_sp=0.01)
>>> cf = ChiFinder(mg, min_drainage_area=20000.)
>>> for i in range(10):
... mg.at_node['topographic__elevation'][mg.core_nodes] += 10.
... fr.run_one_step()
... sp.run_one_step(1000.)
>>> fr.run_one_step()
>>> cf.calculate_chi()
>>> imshow_grid_at_node(mg, 'topographic__elevation',
... allow_colorbar=False)
>>> imshow_grid_at_node(mg, cf.masked_chi_indices,
... color_for_closed=None, cmap='winter')
"""
return np.ma.array(self.chi_indices, mask=self.hillslope_mask)
| mit |
BoltzmannBrain/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cbook.py | 69 | 42525 | """
A collection of utility functions and classes. Many (but not all)
from the Python Cookbook -- hence the name cbook
"""
from __future__ import generators
import re, os, errno, sys, StringIO, traceback, locale, threading, types
import time, datetime
import warnings
import numpy as np
import numpy.ma as ma
from weakref import ref
major, minor1, minor2, s, tmp = sys.version_info
# on some systems, locale.getpreferredencoding returns None, which can break unicode
preferredencoding = locale.getpreferredencoding()
def unicode_safe(s):
if preferredencoding is None: return unicode(s)
else: return unicode(s, preferredencoding)
class converter:
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s==self.missing: return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s==self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s): return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s): return self.missingval
return int(s)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set(signals)
# callbacks is a dict mapping the signal to a dictionary
# mapping callback id to the callback function
self.callbacks = dict([(s, dict()) for s in signals])
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
self._cid +=1
self.callbacks[s][self._cid] = func
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try: del callbackd[cid]
except KeyError: continue
else: return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for func in self.callbacks[s].values():
func(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped: return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a give type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None: self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove: s = s.replace(r,'')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def unique(x):
'Return a list of unique elements of *x*'
return dict([ (val, 1) for val in x]).keys()
def iterable(obj):
'return true if *obj* is iterable'
try: len(obj)
except: return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, (str, unicode)): return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try: obj + ''
except (TypeError, ValueError): return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj): return False
if is_string_like(obj): return False
for o in obj:
if not is_string_like(o): return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and callable(obj.write)
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try: obj+1
except TypeError: return False
else: return True
def to_filehandle(fname, flag='r', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, flag)
else:
fh = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
return is_string_like(val) or not iterable(val)
def flatten(seq, scalarp=is_scalar_or_string):
"""
this generator flattens nested containers such as
>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])
so that
>>> for i in flatten(l): print i,
John Hunter 1 23 42 5 23
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item): yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace: data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i],attributename),i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # Remember first letter
d = soundex_digits[ord(c)-ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __str__(self): return "Null()"
def __repr__(self): return "Null()"
def __nonzero__(self): return 0
def __getattr__(self, name): return self
def __setattr__(self, name, value): return self
def __delattr__(self, name): return self
def mkdirs(newdir, mode=0777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts)+1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError, err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try: del d[key]
except KeyError: pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, range(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen>=N: return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' '*len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind<Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path, fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
# Collect input and output arguments into one bunch
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list,
return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if recursion was disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def get_recursive_filelist(args):
"""
Recurs all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start+num]
if not len(item): break
yield item
start += num
def exception_to_str(s = None):
sh = StringIO.StringIO()
if s is not None: print >>sh, s
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq)<2: return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val: return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if not val: return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if val: return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [ (s, f) for i, f in enumerate(x) for s in x[i+1:] ]
# python 2.2 dicts don't have pop--but we don't support 2.2 any more
def popd(d, *args):
"""
Should behave like python2.3 :meth:`dict.pop` method; *d* is a
:class:`dict`::
# returns value for key and deletes item; raises a KeyError if key
# is not in dict
val = popd(d, key)
# returns value for key if key exists, else default. Delete key,
# val item if it exists. Will not raise a KeyError
val = popd(d, key, default)
"""
warnings.warn("Use native python dict.pop method", DeprecationWarning)
# warning added 2008/07/22
if len(args)==1:
key = args[0]
val = d[key]
del d[key]
elif len(args)==2:
key, default = args
val = d.get(key, default)
try: del d[key]
except KeyError: pass
return val
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if len(self)>=self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
dict.__setitem__(self, k, v)
self._killkeys.append(k)
class Stack:
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements): return self._default
else: return self._elements[self._pos]
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos<N-1: self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos>0: self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos+1]
self._elements.append(o)
self._pos = len(self._elements)-1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements): return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements)==0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso==o: bubbles.append(thiso)
else: self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso==o: continue
else: self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)): seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name,name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match)>=0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v,k) for k,v in d.items()])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
pid = os.getpid()
if sys.platform=='sunos5':
a2 = os.popen('ps -p %d -o osz' % pid).readlines()
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
a2 = os.popen('ps -p %d -o rss,vsz' % pid).readlines()
mem = int(a2[1].split()[0])
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i+1, len(arg)))
return zip(*args)
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n/segments)
ii = range(0, n, dn)
ii[-1] = n-1
print
print 'memory report: i, mem, dmem, dmem/nloops'
print 0, self._mem[0]
for i in range(1, len(ii)):
di = ii[i] - ii[i-1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i-1]]
print '%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di))
if self._overflow:
print "Warning: array size was too small for the number of calls."
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, { }, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g)
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
for key, val in mapping.items():
if key() is None:
del mapping[key]
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token: pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in self._mapping.itervalues():
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in self._mapping.itervalues():
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
steps = np.floor(steps)
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1: ]
delta = ((a1 - a0) / steps)
for i in range(1, int(steps)):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in glob.glob(os.path.join(path, '*')) + glob.glob(os.path.join(path, '.*')):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: #Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def unmasked_index_ranges(mask, compressed = True):
'''
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
'''
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('less_simple_linear_interpolation has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )
def isvector(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('isvector has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.isvector( x, y, xi, extrap=extrap )
def vector_lengths( X, P=2., axis=None ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('vector_lengths has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.vector_lengths( X, P=2., axis=axis )
def distances_along_curve( X ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('distances_along_curve has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.distances_along_curve( X )
def path_length(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('path_length has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.path_length(X)
def is_closed_polygon(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('is_closed_polygon has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.is_closed_polygon(X)
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('quad2cubic has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)
if __name__=='__main__':
assert( allequal([1,1,1]) )
assert(not allequal([1,1,0]) )
assert( allequal([]) )
assert( allequal(('a', 'a')))
assert( not allequal(('a', 'b')))
| agpl-3.0 |
ifuding/Kaggle | TCCC/Code/philly/nfold_train.py | 1 | 8951 | from sklearn.model_selection import KFold
from lgb import lgbm_train
# import xgboost as xgb
from functools import reduce
import numpy as np
# from keras_train import keras_train
# import gensim
# from RCNN_Keras import get_word2vec, RCNN_Model
# from RNN_Keras import RNN_Model
from CNN_Keras import CNN_Model, get_word2vec_embedding
from vdcnn import VDCNN_Model
from tensorflow.python.keras.models import Model
# RNN_PARAMS
RCNN_HIDDEN_UNIT = [128, 64]
def nfold_train(train_data, train_label, model_types = None,
stacking = False, valide_data = None, valide_label = None,
test_data = None, train_weight = None, valide_weight = None,
flags = None ,tokenizer = None, scores = None, emb_weight = None):
"""
nfold Training
"""
print("Over all training size:")
print(train_data.shape)
print("Over all label size:")
print(train_label.shape)
fold = flags.nfold
kf = KFold(n_splits=fold, shuffle=False)
# wv_model = gensim.models.Word2Vec.load("wv_model_norm.gensim")
stacking = flags.stacking
stacking_data = None
stacking_label = None
test_preds = None
num_fold = 0
models = []
# if flags.load_wv_model:
# embedding_weight = get_word2vec_embedding(location = flags.input_training_data_path + flags.wv_model_file, \
# tokenizer = tokenizer, nb_words = flags.vocab_size, embed_size = flags.emb_dim, \
# model_type = flags.wv_model_type)
# else:
embedding_weight = emb_weight
for train_index, test_index in kf.split(train_data):
print('fold: %d th train :-)' % (num_fold))
print('Train size: {} Valide size: {}'.format(train_index.shape[0], test_index.shape[0]))
# print(test_index[:100])
# exit(0)
if valide_label is None:
train_part = train_data[train_index]
train_part_label = train_label[train_index]
valide_part = train_data[test_index]
valide_part_label = train_label[test_index]
if train_weight is not None:
train_part_weight = train_weight[train_index]
valide_part_weight = train_weight[test_index]
else:
train_part = train_data
train_part_label = train_label
valide_part = valide_data
valide_part_label = valide_label
if train_weight is not None:
train_part_weight, valide_part_weight = train_weight, valide_weight
onefold_models = []
for model_type in model_types:
if model_type == 'k':
pass
# with tf.device('/cpu:0'):
model = keras_train(train_part, train_part_label, valide_part, valide_part_label, num_fold)
onefold_models.append((model, 'k'))
elif model_type == 'x':
pass
# model = xgb_train(train_part, train_part_label, valide_part, valide_part_label, num_fold)
# onefold_models.append((model, 'x'))
elif model_type == 'l':
model = lgbm_train(train_part, train_part_label, valide_part, valide_part_label, num_fold,
fold)
onefold_models.append((model, 'l'))
elif model_type == 'rcnn':
# model = Create_RCNN(MAX_NUM_WORDS, RNN_EMBEDDING_DIM, 2, LSTM_UNIT, RCNN_HIDDEN_UNIT, wv_model)
model = RCNN_Model(wv_model_file = 'wv_model_norm.gensim', num_classes = 2, context_vector_dim = LSTM_UNIT, \
hidden_dim = RCNN_HIDDEN_UNIT, max_len = MAX_SEQUENCE_LEN)
model.train(train_part, train_part_label, valide_part, valide_part_label)
print(model.model.summary())
onefold_models.append((model, 'rcnn'))
elif model_type == 'rnn':
model = RNN_Model(max_token = MAX_NUM_WORDS, num_classes = 6, context_vector_dim = LSTM_UNIT, \
hidden_dim = RCNN_HIDDEN_UNIT, max_len = MAX_SEQUENCE_LEN, embedding_dim = RNN_EMBEDDING_DIM)
model.train(train_part, train_part_label, valide_part, valide_part_label)
# print(model.model.summary())
onefold_models.append((model, 'rnn'))
elif model_type == 'cnn':
model = CNN_Model(max_token = flags.vocab_size, num_classes = 6, \
context_vector_dim = [int(hn.strip()) for hn in flags.rnn_unit.strip().split(',')], \
hidden_dim = [int(hn.strip()) for hn in flags.full_connect_hn.strip().split(',')], \
max_len = flags.max_seq_len, embedding_dim = flags.emb_dim, tokenizer = tokenizer, \
embedding_weight = embedding_weight, batch_size = flags.batch_size, epochs = flags.epochs, \
filter_size = [int(hn.strip()) for hn in flags.filter_size.strip().split(',')], \
fix_wv_model = flags.fix_wv_model, \
batch_interval = flags.batch_interval, emb_dropout = flags.emb_dropout, \
full_connect_dropout = flags.full_connect_dropout, separate_label_layer = flags.separate_label_layer, \
scores = scores, resnet_hn = flags.resnet_hn, top_k = flags.vdcc_top_k, char_split = flags.char_split,\
kernel_size_list = [int(kernel.strip()) for kernel in flags.kernel_size_list.strip().split(',')],
rnn_input_dropout = flags.rnn_input_dropout, rnn_state_dropout = flags.rnn_state_dropout)
if num_fold == 0:
print(model.model.summary())
model.train(train_part, train_part_label, valide_part, valide_part_label)
if stacking:
model = Model(inputs = model.model.inputs, outputs = model.model.get_layer(name = 'RCNN_CONC').output)
onefold_models.append((model, 'cnn'))
elif model_type == 'vdcnn':
model = VDCNN_Model(num_filters = [int(hn.strip()) for hn in flags.vdcnn_filters.strip().split(',')], \
sequence_max_length = flags.max_seq_len, top_k = flags.vdcc_top_k, embedding_size = flags.emb_dim, \
hidden_dim = [int(hn.strip()) for hn in flags.full_connect_hn.strip().split(',')], \
batch_size = flags.batch_size, dense_dropout = flags.full_connect_dropout, epochs = flags.epochs)
if num_fold == 0:
print(model.model.summary())
model.train(train_part, train_part_label, valide_part, valide_part_label)
onefold_models.append((model, 'cnn'))
if stacking:
valide_pred = [model_eval(model[0], model[1], valide_part) for model in onefold_models]
valide_pred = reduce((lambda x, y: np.c_[x, y]), valide_pred)
test_pred = [model_eval(model[0], model[1], test_data) for model in onefold_models]
test_pred = reduce((lambda x, y: np.c_[x, y]), test_pred)
if stacking_data is None:
stacking_data = valide_pred #np.c_[valide_part, valide_pred]
stacking_label = valide_part_label
test_preds = test_pred
else:
stacking_data = np.append(stacking_data, valide_pred, axis = 0) #np.append(stacking_data, np.c_[valide_part, valide_pred], axis = 0)
stacking_label = np.append(stacking_label, valide_part_label, axis = 0)
test_preds += test_pred
print('stacking_data shape: {0}'.format(stacking_data.shape))
print('stacking_label shape: {0}'.format(stacking_label.shape))
print('stacking test data shape: {0}'.format(test_preds.shape))
models.append(onefold_models[0])
num_fold += 1
if num_fold == flags.ensemble_nfold:
break
if stacking:
test_preds /= flags.ensemble_nfold
# test_data = np.c_[test_data, test_preds]
return models, stacking_data, stacking_label, test_preds
def model_eval(model, model_type, data_frame):
"""
"""
if model_type == 'l':
preds = model.predict(data_frame)
elif model_type == 'k' or model_type == 'LR' or model_type == 'DNN' or model_type == 'rcnn' \
or model_type == 'rnn' or model_type == 'cnn':
preds = model.predict(data_frame, verbose = 2)
elif model_type == 't':
print("ToDO")
elif model_type == 'x':
preds = model.predict(xgb.DMatrix(data_frame), ntree_limit=model.best_ntree_limit)
return preds #.reshape((data_frame.shape[0], -1))
def models_eval(models, data):
preds = None
for (model, model_type) in models:
pred = model_eval(model, model_type, data)
if preds is None:
preds = pred.copy()
else:
preds += pred
preds /= len(models)
return preds | apache-2.0 |
rahul-c1/scikit-learn | sklearn/metrics/tests/test_regression.py | 31 | 3010 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred)
assert_almost_equal(error, 1 - 5. / 2)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2 = _check_reg_targets(y1, y2)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2)
| bsd-3-clause |
pprett/sklearn_pycon2014 | notebooks/fig_code/ML_flow_chart.py | 61 | 4970 | """
Tutorial Diagrams
-----------------
This script plots the flow-charts used in the scikit-learn tutorials.
"""
import numpy as np
import pylab as pl
from matplotlib.patches import Circle, Rectangle, Polygon, Arrow, FancyArrow
def create_base(box_bg = '#CCCCCC',
arrow1 = '#88CCFF',
arrow2 = '#88FF88',
supervised=True):
fig = pl.figure(figsize=(9, 6), facecolor='w')
ax = pl.axes((0, 0, 1, 1),
xticks=[], yticks=[], frameon=False)
ax.set_xlim(0, 9)
ax.set_ylim(0, 6)
patches = [Rectangle((0.3, 3.6), 1.5, 1.8, zorder=1, fc=box_bg),
Rectangle((0.5, 3.8), 1.5, 1.8, zorder=2, fc=box_bg),
Rectangle((0.7, 4.0), 1.5, 1.8, zorder=3, fc=box_bg),
Rectangle((2.9, 3.6), 0.2, 1.8, fc=box_bg),
Rectangle((3.1, 3.8), 0.2, 1.8, fc=box_bg),
Rectangle((3.3, 4.0), 0.2, 1.8, fc=box_bg),
Rectangle((0.3, 0.2), 1.5, 1.8, fc=box_bg),
Rectangle((2.9, 0.2), 0.2, 1.8, fc=box_bg),
Circle((5.5, 3.5), 1.0, fc=box_bg),
Polygon([[5.5, 1.7],
[6.1, 1.1],
[5.5, 0.5],
[4.9, 1.1]], fc=box_bg),
FancyArrow(2.3, 4.6, 0.35, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.75, 4.2, 0.5, -0.2, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(5.5, 2.4, 0, -0.4, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(2.0, 1.1, 0.5, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.3, 1.1, 1.3, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(6.2, 1.1, 0.8, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2)]
if supervised:
patches += [Rectangle((0.3, 2.4), 1.5, 0.5, zorder=1, fc=box_bg),
Rectangle((0.5, 2.6), 1.5, 0.5, zorder=2, fc=box_bg),
Rectangle((0.7, 2.8), 1.5, 0.5, zorder=3, fc=box_bg),
FancyArrow(2.3, 2.9, 2.0, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
Rectangle((7.3, 0.85), 1.5, 0.5, fc=box_bg)]
else:
patches += [Rectangle((7.3, 0.2), 1.5, 1.8, fc=box_bg)]
for p in patches:
ax.add_patch(p)
pl.text(1.45, 4.9, "Training\nText,\nDocuments,\nImages,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.6, 4.9, "Feature\nVectors",
ha='left', va='center', fontsize=14)
pl.text(5.5, 3.5, "Machine\nLearning\nAlgorithm",
ha='center', va='center', fontsize=14)
pl.text(1.05, 1.1, "New Text,\nDocument,\nImage,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.3, 1.7, "Feature\nVector",
ha='left', va='center', fontsize=14)
pl.text(5.5, 1.1, "Predictive\nModel",
ha='center', va='center', fontsize=12)
if supervised:
pl.text(1.45, 3.05, "Labels",
ha='center', va='center', fontsize=14)
pl.text(8.05, 1.1, "Expected\nLabel",
ha='center', va='center', fontsize=14)
pl.text(8.8, 5.8, "Supervised Learning Model",
ha='right', va='top', fontsize=18)
else:
pl.text(8.05, 1.1,
"Likelihood\nor Cluster ID\nor Better\nRepresentation",
ha='center', va='center', fontsize=12)
pl.text(8.8, 5.8, "Unsupervised Learning Model",
ha='right', va='top', fontsize=18)
def plot_supervised_chart(annotate=False):
create_base(supervised=True)
if annotate:
fontdict = dict(color='r', weight='bold', size=14)
pl.text(1.9, 4.55, 'X = vec.fit_transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(3.7, 3.2, 'clf.fit(X, y)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(1.7, 1.5, 'X_new = vec.transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(6.1, 1.5, 'y_new = clf.predict(X_new)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
def plot_unsupervised_chart():
create_base(supervised=False)
if __name__ == '__main__':
plot_supervised_chart(False)
plot_supervised_chart(True)
plot_unsupervised_chart()
pl.show()
| bsd-3-clause |
emon10005/scikit-image | doc/examples/plot_ihc_color_separation.py | 18 | 1925 | """
==============================================
Immunohistochemical staining colors separation
==============================================
In this example we separate the immunohistochemical (IHC) staining from the
hematoxylin counterstaining. The separation is achieved with the method
described in [1]_, known as "color deconvolution".
The IHC staining expression of the FHL2 protein is here revealed with
Diaminobenzidine (DAB) which gives a brown color.
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.color import rgb2hed
ihc_rgb = data.immunohistochemistry()
ihc_hed = rgb2hed(ihc_rgb)
fig, axes = plt.subplots(2, 2, figsize=(7, 6))
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.imshow(ihc_rgb)
ax0.set_title("Original image")
ax1.imshow(ihc_hed[:, :, 0], cmap=plt.cm.gray)
ax1.set_title("Hematoxylin")
ax2.imshow(ihc_hed[:, :, 1], cmap=plt.cm.gray)
ax2.set_title("Eosin")
ax3.imshow(ihc_hed[:, :, 2], cmap=plt.cm.gray)
ax3.set_title("DAB")
for ax in axes.ravel():
ax.axis('off')
fig.subplots_adjust(hspace=0.3)
"""
.. image:: PLOT2RST.current_figure
Now we can easily manipulate the hematoxylin and DAB "channels":
"""
import numpy as np
from skimage.exposure import rescale_intensity
# Rescale hematoxylin and DAB signals and give them a fluorescence look
h = rescale_intensity(ihc_hed[:, :, 0], out_range=(0, 1))
d = rescale_intensity(ihc_hed[:, :, 2], out_range=(0, 1))
zdh = np.dstack((np.zeros_like(h), d, h))
fig, ax = plt.subplots()
ax.imshow(zdh)
ax.set_title("Stain separated image (rescaled)")
ax.axis('off')
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| bsd-3-clause |
sjsrey/pysal_core | pysal_core/io/geotable/dbf.py | 2 | 6681 | """miscellaneous file manipulation utilities
"""
import numpy as np
import pandas as pd
from ..FileIO import FileIO as ps_open
def check_dups(li):
"""checks duplicates in list of ID values
ID values must be read in as a list
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
li : list of ID values
Returns
-------
a list with the duplicate IDs
"""
return list(set([x for x in li if li.count(x) > 1]))
def dbfdups(dbfpath,idvar):
"""checks duplicates in a dBase file
ID variable must be specified correctly
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
dbfpath : file path to dBase file
idvar : ID variable in dBase file
Returns
-------
a list with the duplicate IDs
"""
db = ps_open(dbfpath,'r')
li = db.by_col(idvar)
return list(set([x for x in li if li.count(x) > 1]))
def df2dbf(df, dbf_path, my_specs=None):
'''
Convert a pandas.DataFrame into a dbf.
__author__ = "Dani Arribas-Bel <[email protected]>, Luc Anselin <[email protected]>"
...
Arguments
---------
df : DataFrame
Pandas dataframe object to be entirely written out to a dbf
dbf_path : str
Path to the output dbf. It is also returned by the function
my_specs : list
List with the field_specs to use for each column.
Defaults to None and applies the following scheme:
* int: ('N', 14, 0) - for all ints
* float: ('N', 14, 14) - for all floats
* str: ('C', 14, 0) - for string, object and category
with all variants for different type sizes
Note: use of dtypes.name may not be fully robust, but preferred apprach of using
isinstance seems too clumsy
'''
if my_specs:
specs = my_specs
else:
"""
type2spec = {int: ('N', 20, 0),
np.int64: ('N', 20, 0),
np.int32: ('N', 20, 0),
np.int16: ('N', 20, 0),
np.int8: ('N', 20, 0),
float: ('N', 36, 15),
np.float64: ('N', 36, 15),
np.float32: ('N', 36, 15),
str: ('C', 14, 0)
}
types = [type(df[i].iloc[0]) for i in df.columns]
"""
# new approach using dtypes.name to avoid numpy name issue in type
type2spec = {'int': ('N', 20, 0),
'int8': ('N', 20, 0),
'int16': ('N', 20, 0),
'int32': ('N', 20, 0),
'int64': ('N', 20, 0),
'float': ('N', 36, 15),
'float32': ('N', 36, 15),
'float64': ('N', 36, 15),
'str': ('C', 14, 0),
'object': ('C', 14, 0),
'category': ('C', 14, 0)
}
types = [df[i].dtypes.name for i in df.columns]
specs = [type2spec[t] for t in types]
db = ps_open(dbf_path, 'w')
db.header = list(df.columns)
db.field_spec = specs
for i, row in df.T.iteritems():
db.write(row)
db.close()
return dbf_path
def dbf2df(dbf_path, index=None, cols=False, incl_index=False):
'''
Read a dbf file as a pandas.DataFrame, optionally selecting the index
variable and which columns are to be loaded.
__author__ = "Dani Arribas-Bel <[email protected]> "
...
Arguments
---------
dbf_path : str
Path to the DBF file to be read
index : str
Name of the column to be used as the index of the DataFrame
cols : list
List with the names of the columns to be read into the
DataFrame. Defaults to False, which reads the whole dbf
incl_index : Boolean
If True index is included in the DataFrame as a
column too. Defaults to False
Returns
-------
df : DataFrame
pandas.DataFrame object created
'''
db = ps_open(dbf_path)
if cols:
if incl_index:
cols.append(index)
vars_to_read = cols
else:
vars_to_read = db.header
data = dict([(var, db.by_col(var)) for var in vars_to_read])
if index:
index = db.by_col(index)
db.close()
return pd.DataFrame(data, index=index, columns=vars_to_read)
else:
db.close()
return pd.DataFrame(data,columns=vars_to_read)
def dbfjoin(dbf1_path,dbf2_path,out_path,joinkey1,joinkey2):
'''
Wrapper function to merge two dbf files into a new dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses dbf2df and df2dbf to read and write the dbf files into a pandas
DataFrame. Uses all default settings for dbf2df and df2dbf (see docs
for specifics).
...
Arguments
---------
dbf1_path : str
Path to the first (left) dbf file
dbf2_path : str
Path to the second (right) dbf file
out_path : str
Path to the output dbf file (returned by the function)
joinkey1 : str
Variable name for the key in the first dbf. Must be specified.
Key must take unique values.
joinkey2 : str
Variable name for the key in the second dbf. Must be specified.
Key must take unique values.
Returns
-------
dbfpath : path to output file
'''
df1 = dbf2df(dbf1_path,index=joinkey1)
df2 = dbf2df(dbf2_path,index=joinkey2)
dfbig = pd.merge(df1,df2,left_on=joinkey1,right_on=joinkey2,sort=False)
dp = df2dbf(dfbig,out_path)
return dp
def dta2dbf(dta_path,dbf_path):
"""
Wrapper function to convert a stata dta file into a dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses df2dbf to write the dbf files from a pandas
DataFrame. Uses all default settings for df2dbf (see docs
for specifics).
...
Arguments
---------
dta_path : str
Path to the Stata dta file
dbf_path : str
Path to the output dbf file
Returns
-------
dbf_path : path to output file
"""
db = pd.read_stata(dta_path)
dp = df2dbf(db,dbf_path)
return dp
| bsd-3-clause |
thunderhoser/GewitterGefahr | gewittergefahr/interpretation_paper_2019/make_permutation_figure.py | 1 | 6319 | """Makes figure with results of all 4 permutation tests."""
import argparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pyplot
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.deep_learning import permutation_utils
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import permutation_plotting
FIGURE_RESOLUTION_DPI = 300
FORWARD_FILE_ARG_NAME = 'forward_test_file_name'
BACKWARDS_FILE_ARG_NAME = 'backwards_test_file_name'
NUM_PREDICTORS_ARG_NAME = 'num_predictors'
CONFIDENCE_LEVEL_ARG_NAME = 'confidence_level'
OUTPUT_FILE_ARG_NAME = 'output_file_name'
FORWARD_FILE_HELP_STRING = (
'Path to file with results of forward (both single- and multi-pass) '
'permutation tests. Will be read by `permutation_utils.read_results`.'
)
BACKWARDS_FILE_HELP_STRING = (
'Same as `{0:s}` but for backwards tests.'
).format(FORWARD_FILE_ARG_NAME)
NUM_PREDICTORS_HELP_STRING = (
'Will plot the K most important predictors for each test, where K = '
'`{0:s}`. If you want to plot all predictors, leave this argument alone.'
).format(NUM_PREDICTORS_ARG_NAME)
CONFIDENCE_LEVEL_HELP_STRING = (
'Confidence level for error bars (in range 0...1).'
)
OUTPUT_FILE_HELP_STRING = (
'Path to output file (figure will be saved here).'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + FORWARD_FILE_ARG_NAME, type=str, required=True,
help=FORWARD_FILE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + BACKWARDS_FILE_ARG_NAME, type=str, required=True,
help=BACKWARDS_FILE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_PREDICTORS_ARG_NAME, type=int, required=False, default=-1,
help=NUM_PREDICTORS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + CONFIDENCE_LEVEL_ARG_NAME, type=float, required=False, default=0.95,
help=CONFIDENCE_LEVEL_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,
help=OUTPUT_FILE_HELP_STRING
)
def _run(forward_test_file_name, backwards_test_file_name, num_predictors,
confidence_level, output_file_name):
"""Makes figure with results of all 4 permutation tests.
This is effectively the main method.
:param forward_test_file_name: See documentation at top of file.
:param backwards_test_file_name: Same.
:param num_predictors: Same.
:param confidence_level: Same.
:param output_file_name: Same.
"""
if num_predictors <= 0:
num_predictors = None
file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
print('Reading data from: "{0:s}"...'.format(forward_test_file_name))
forward_test_dict = permutation_utils.read_results(forward_test_file_name)
print('Reading data from: "{0:s}"...'.format(backwards_test_file_name))
backwards_test_dict = permutation_utils.read_results(
backwards_test_file_name
)
figure_object, axes_object_matrix = plotting_utils.create_paneled_figure(
num_rows=2, num_columns=2, shared_x_axis=False, shared_y_axis=True,
keep_aspect_ratio=False, horizontal_spacing=0.1, vertical_spacing=0.05
)
permutation_plotting.plot_single_pass_test(
permutation_dict=forward_test_dict,
axes_object=axes_object_matrix[0, 0],
plot_percent_increase=False, confidence_level=confidence_level,
num_predictors_to_plot=num_predictors
)
axes_object_matrix[0, 0].set_title('Forward single-pass test')
axes_object_matrix[0, 0].set_xticks([])
axes_object_matrix[0, 0].set_xlabel('')
plotting_utils.label_axes(
axes_object=axes_object_matrix[0, 0], label_string='(a)',
x_coord_normalized=-0.01, y_coord_normalized=0.925
)
permutation_plotting.plot_multipass_test(
permutation_dict=forward_test_dict,
axes_object=axes_object_matrix[0, 1],
plot_percent_increase=False, confidence_level=confidence_level,
num_predictors_to_plot=num_predictors
)
axes_object_matrix[0, 1].set_title('Forward multi-pass test')
axes_object_matrix[0, 1].set_xticks([])
axes_object_matrix[0, 1].set_xlabel('')
axes_object_matrix[0, 1].set_ylabel('')
plotting_utils.label_axes(
axes_object=axes_object_matrix[0, 1], label_string='(b)',
x_coord_normalized=1.15, y_coord_normalized=0.925
)
permutation_plotting.plot_single_pass_test(
permutation_dict=backwards_test_dict,
axes_object=axes_object_matrix[1, 0],
plot_percent_increase=False, confidence_level=confidence_level,
num_predictors_to_plot=num_predictors
)
axes_object_matrix[1, 0].set_title('Backward single-pass test')
axes_object_matrix[1, 0].set_xlabel('Area under ROC curve (AUC)')
plotting_utils.label_axes(
axes_object=axes_object_matrix[1, 0], label_string='(c)',
x_coord_normalized=-0.01, y_coord_normalized=0.925
)
permutation_plotting.plot_multipass_test(
permutation_dict=backwards_test_dict,
axes_object=axes_object_matrix[1, 1],
plot_percent_increase=False, confidence_level=confidence_level,
num_predictors_to_plot=num_predictors
)
axes_object_matrix[1, 1].set_title('Backward multi-pass test')
axes_object_matrix[1, 1].set_xlabel('Area under ROC curve (AUC)')
axes_object_matrix[1, 1].set_ylabel('')
plotting_utils.label_axes(
axes_object=axes_object_matrix[1, 1], label_string='(d)',
x_coord_normalized=1.15, y_coord_normalized=0.925
)
print('Saving figure to: "{0:s}"...'.format(output_file_name))
figure_object.savefig(
output_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(figure_object)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
forward_test_file_name=getattr(INPUT_ARG_OBJECT, FORWARD_FILE_ARG_NAME),
backwards_test_file_name=getattr(
INPUT_ARG_OBJECT, BACKWARDS_FILE_ARG_NAME
),
num_predictors=getattr(INPUT_ARG_OBJECT, NUM_PREDICTORS_ARG_NAME),
confidence_level=getattr(INPUT_ARG_OBJECT, CONFIDENCE_LEVEL_ARG_NAME),
output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)
)
| mit |
naritta/numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
mamikonyana/mamikonyana.github.io | static/ml_afternoon/presentation_data/practical_s1/color_points.py | 1 | 1060 | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import argparse
from kmeans import KMeans
from mixture import GaussianMixtureModel
def parse_args(*argument_array):
parser = argparse.ArgumentParser()
parser.add_argument('data_csv')
parser.add_argument('--num-clusters', type=int, default=15)
parser.add_argument('--algorithm', choices=['k-means', 'gmm'],
default='k-means')
args = parser.parse_args(*argument_array)
return args
def main(args):
df = pd.read_csv(args.data_csv)
data = np.array(df[['X', 'Y']])
plt.clf()
plt.scatter(data[:, 0], data[:, 1], s=3, color='blue')
if args.algorithm == 'gmm':
gmm = GaussianMixtureModel(args.num_clusters)
gmm.fit(data)
y = gmm.predict_cluster(data)
else:
km = KMeans(args.num_clusters)
km.fit(data)
y = km.predict(data)
plt.scatter(data[:, 0], data[:, 1], c=y)
plt.show()
if __name__ == '__main__':
args = parse_args()
main(args)
| mit |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/misc/common.py | 1 | 6176 | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
from numpy import arange, newaxis, hstack, product, array, fromstring
__all__ = ['central_diff_weights', 'derivative', 'lena', 'ascent', 'face']
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho, ho + 1.0)
x = x[:, newaxis]
X = x ** 0.0
for k in range(1, Np):
X = hstack([X, x ** k])
w = product(arange(1, ndiv + 1), axis=0) * linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1, 0, 1]) / 2.0
elif order == 5:
weights = array([1, -8, 0, 8, -1]) / 12.0
elif order == 7:
weights = array([-1, 9, -45, 0, 45, -9, 1]) / 60.0
elif order == 9:
weights = array([3, -32, 168, -672, 0, 672, -168, 32, -3]) / 840.0
else:
weights = central_diff_weights(order, 1)
elif n == 2:
if order == 3:
weights = array([1, -2.0, 1])
elif order == 5:
weights = array([-1, 16, -30, 16, -1]) / 12.0
elif order == 7:
weights = array([2, -27, 270, -490, 270, -27, 2]) / 180.0
elif order == 9:
weights = array([-9, 128, -1008, 8064, -14350, 8064, -1008, 128, -9]) / 5040.0
else:
weights = central_diff_weights(order, 2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k] * func(x0 + (k - ho) * dx, *args)
return val / product((dx,) * n, axis=0)
def lena():
"""
Function that previously returned an example image
.. note:: Removed in 0.17
Parameters
----------
None
Returns
-------
None
Raises
------
RuntimeError
This functionality has been removed due to licensing reasons.
Notes
-----
The image previously returned by this function has an incompatible license
and has been removed from SciPy. Please use `face` or `ascent` instead.
See Also
--------
face, ascent
"""
raise RuntimeError('lena() is no longer included in SciPy, please use '
'ascent() or face() instead')
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__), 'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] + 0.07 * face[:, :, 2]).astype('uint8')
return face
| mit |
jayflo/scikit-learn | examples/classification/plot_lda_qda.py | 164 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
tomwallis/PsyUtils | psyutils/misc.py | 1 | 6653 | # miscellaneous functions
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import numpy as _np
import psyutils as _pu
import itertools as it
import pandas as pd
def fixation_cross():
"""Return a 256 square numpy array containing a rendering of the
fixation cross recommended in Thaler et al for low dispersion and
microsaccade rate. You could rescale this to the appropriate size (outer
ring should be 0.6 dva in diameter and inner ring 0.2 dva).
Example:
Our stimulus display has 40 pixels per degree of visual angle::
from skimage import transform
sz = round(40 * 0.6)
fixation_cross = transform.resize(pu.misc.fixation_cross(), (sz,sz))
Reference:
Thaler, L., Schütz, A. C., Goodale, M. A., & Gegenfurtner, K. R. (2013)
What is the best fixation target? The effect of target shape on
stability of fixational eye movements. Vision Research, 76(C), 31–42.
"""
outer_rad = 128
inner_rad = int((0.2 / 0.6)*outer_rad) # inner is 0.2
def _draw_oval(radius):
im = _np.ones((radius*2, radius*2))
x = _np.linspace(-radius, radius, num=radius*2)
xx, yy = _np.meshgrid(x, x)
rad_dist = (xx**2 + yy**2)**0.5
im[rad_dist <= radius] = 0
return(im)
im = _draw_oval(outer_rad)
im[outer_rad - inner_rad:outer_rad + inner_rad, :] = 1
im[:, outer_rad - inner_rad:outer_rad + inner_rad] = 1
im[outer_rad-inner_rad:outer_rad+inner_rad,
outer_rad-inner_rad:outer_rad+inner_rad] = _draw_oval(inner_rad)
return(im)
def draw_box(size, channel='r', width=4):
"""Make a box of a given size that can be placed into images to highlight
a region of interest. The middle of the box is transparent (i.e. alpha 0)
to show what's in the region of interest.
Args:
size (tuple or scalar):
the size of the box in pixels; either square if a scalar is passed
or (w, h) from tuple.
channel (string):
specify box colour according to colour channel ('r', 'g', 'b')
width (int):
width of box lines in pixels.
Returns:
a numpy array with shape [size, size, 4].
"""
if channel == 'r':
chan = 0
elif channel == 'g':
chan = 1
elif channel == 'b':
chan = 2
else:
raise ValueError("don't know what colour channel to use")
w, h = _pu.image.parse_size(size)
box = _np.zeros((h, w, 4))
box[0:h, 0:width, chan] = 1.
box[0:h, -width:, chan] = 1.
box[0:width, 0:w, chan] = 1.
box[-width:, 0:w, chan] = 1.
box[0:h, 0:width, 3] = 1.
box[0:h, -width:, 3] = 1.
box[0:width, 0:w, 3] = 1.
box[-width:, 0:w, 3] = 1.
return(box)
def pix_per_deg(viewing_distance, screen_wh_px, screen_wh_cm,
average_wh=True):
"""Return the number of pixels per degree of visual angle for a given
viewing distance of a screen of some resolution and size.
Note: this assumes a constant viewing distance, so there will be an error
that increases with eccentricity. For example, at a viewing distance of
60 cm, something 30 degrees eccentric will be at a distance of 69 cm
(60 / np.cos(30 * np.pi / 180)), if presented on a flat screen. At that
viewing distance, the number of pixels per degree will be higher (46
compared to 40 for the example monitor below) --- i.e. about a 13
percent size error at 30 degrees.
Args:
viewing_distance (float):
the viewing distance of the screen (screen to subject's eye) in cm.
screen_wh_px (tuple):
the width and height of the screen in pixels.
screen_wh_cm (tuple):
the width and height of the screen in cm.
average_wh (boolean, default True):
if true, computes pix per deg based on the average of the
width and height.
If false, returns a tuple (width, height).
Returns:
float: the number of pixels per degree of visual angle, assuming a
constant distance.
or if average_wh=False, a 2 element numpy array.
Example::
dist = 60
px = (1920, 1080)
cm = (52, 29)
pu.misc.pix_per_deg(60, (1920, 1080), (52, 29))
# gives 40.36 pixels per degree.
"""
wh_px = _np.array(screen_wh_px)
wh_cm = _np.array(screen_wh_cm)
ppd = _np.pi * (wh_px) / _np.arctan(wh_cm / viewing_distance / 2.) / 360.
if average_wh is True:
res = ppd.mean()
elif average_wh is False:
res = ppd
return(res)
def rad_ang(xy):
"""Return radius and polar angle relative to (0, 0)
of given x and y coordinates.
Args:
xy: a tuple of x and y positions.
Returns:
rad, ang: a tuple of radius from centre
and polar angle (radians):
right = 0
top = pi/2
left = pi (or -pi)
bottom = -pi/2
"""
x, y = (xy[0], xy[1])
# compute radius and angle of patch centre:
radius = _np.sqrt(x**2 + y**2)
angle = _np.arctan2(y, x)
return(radius, angle)
def xy(radius, angle):
""" returns the x, y coords of a point given a radius
and angle (in radians).
Args:
radius: a float or int specifying the radius
angle: the polar angle in radians.
right = 0
top = pi/2
left = pi (or -pi)
bottom = -pi/2
Returns:
x, y: a tuple of x and y coordinates.
"""
x = radius * _np.cos(angle)
y = radius * _np.sin(angle)
return(x, y)
def expand_grid(data_dict):
""" A port of R's expand.grid function for use with Pandas dataframes.
Taken from:
`http://pandas.pydata.org/pandas-docs/stable/cookbook.html?highlight=expand%20grid`
Args:
data_dict:
a dictionary or ordered dictionary of column names and values.
Returns:
A pandas dataframe with all combinations of the values given.
Examples::
import psyutils as pu
print(pu.misc.expand_grid(
{'height': [60, 70],
'weight': [100, 140, 180],
'sex': ['Male', 'Female']})
from collections import OrderedDict
entries = OrderedDict([('height', [60, 70]),
('weight', [100, 140, 180]),
('sex', ['Male', 'Female'])])
print(pu.misc.expand_grid(entries))
"""
rows = it.product(*data_dict.values())
return pd.DataFrame.from_records(rows, columns=data_dict.keys())
| mit |
ndingwall/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 9 | 21205 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
import pytest
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils._testing import assert_array_equal, assert_array_less
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.base import clone
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble._weight_boosting import _samme_proba
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn.utils._mocking import NoSampleWeightWrapper
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the diabetes dataset and randomly permute it
diabetes = datasets.load_diabetes()
diabetes.data, diabetes.target = shuffle(diabetes.data, diabetes.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator:
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = _samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert np.isfinite(samme_proba).all()
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_classification_toy(algorithm):
# Check classification on a toy dataset.
clf = AdaBoostClassifier(algorithm=algorithm, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert clf.predict_proba(T).shape == (len(T), 2)
assert clf.decision_function(T).shape == (len(T),)
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert proba.shape[1] == len(classes)
assert clf.decision_function(iris.data).shape[1] == len(classes)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert len(clf.estimators_) > 1
# Check for distinct random states (see issue #7408)
assert (len(set(est.random_state for est in clf.estimators_)) ==
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
@pytest.mark.parametrize('loss', ['linear', 'square', 'exponential'])
def test_diabetes(loss):
# Check consistency on dataset diabetes.
reg = AdaBoostRegressor(loss=loss, random_state=0)
reg.fit(diabetes.data, diabetes.target)
score = reg.score(diabetes.data, diabetes.target)
assert score > 0.6
# Check we used multiple estimators
assert len(reg.estimators_) > 1
# Check for distinct random states (see issue #7408)
assert (len(set(est.random_state for est in reg.estimators_)) ==
len(reg.estimators_))
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_staged_predict(algorithm):
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
diabetes_weights = rng.randint(10, size=diabetes.target.shape)
clf = AdaBoostClassifier(algorithm=algorithm, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert len(staged_predictions) == 10
assert_array_almost_equal(predictions, staged_predictions[-1])
assert len(staged_probas) == 10
assert_array_almost_equal(proba, staged_probas[-1])
assert len(staged_scores) == 10
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(diabetes.data, diabetes.target, sample_weight=diabetes_weights)
predictions = clf.predict(diabetes.data)
staged_predictions = [p for p in clf.staged_predict(diabetes.data)]
score = clf.score(diabetes.data, diabetes.target,
sample_weight=diabetes_weights)
staged_scores = [
s for s in clf.staged_score(
diabetes.data, diabetes.target, sample_weight=diabetes_weights)]
assert len(staged_predictions) == 10
assert_array_almost_equal(predictions, staged_predictions[-1])
assert len(staged_scores) == 10
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(diabetes.data, diabetes.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert type(obj2) == obj.__class__
score2 = obj2.score(iris.data, iris.target)
assert score == score2
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(diabetes.data, diabetes.target)
score = obj.score(diabetes.data, diabetes.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert type(obj2) == obj.__class__
score2 = obj2.score(diabetes.data, diabetes.target)
assert score == score2
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert importances.shape[0] == 10
assert (importances[:3, np.newaxis] >= importances[3:]).all()
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super().fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super().fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert len(boost.estimator_weights_) == len(boost.estimator_errors_)
def test_multidimensional_X():
"""
Check that the AdaBoost estimators can work with n-dimensional
data matrix
"""
rng = np.random.RandomState(0)
X = rng.randn(50, 3, 3)
yc = rng.choice([0, 1], 50)
yr = rng.randn(50)
boost = AdaBoostClassifier(DummyClassifier(strategy='most_frequent'))
boost.fit(X, yc)
boost.predict(X)
boost.predict_proba(X)
boost = AdaBoostRegressor(DummyRegressor())
boost.fit(X, yr)
boost.predict(X)
@pytest.mark.parametrize("algorithm", ['SAMME', 'SAMME.R'])
def test_adaboostclassifier_without_sample_weight(algorithm):
X, y = iris.data, iris.target
base_estimator = NoSampleWeightWrapper(DummyClassifier())
clf = AdaBoostClassifier(
base_estimator=base_estimator, algorithm=algorithm
)
err_msg = ("{} doesn't support sample_weight"
.format(base_estimator.__class__.__name__))
with pytest.raises(ValueError, match=err_msg):
clf.fit(X, y)
def test_adaboostregressor_sample_weight():
# check that giving weight will have an influence on the error computed
# for a weak learner
rng = np.random.RandomState(42)
X = np.linspace(0, 100, num=1000)
y = (.8 * X + 0.2) + (rng.rand(X.shape[0]) * 0.0001)
X = X.reshape(-1, 1)
# add an arbitrary outlier
X[-1] *= 10
y[-1] = 10000
# random_state=0 ensure that the underlying bootstrap will use the outlier
regr_no_outlier = AdaBoostRegressor(
base_estimator=LinearRegression(), n_estimators=1, random_state=0
)
regr_with_weight = clone(regr_no_outlier)
regr_with_outlier = clone(regr_no_outlier)
# fit 3 models:
# - a model containing the outlier
# - a model without the outlier
# - a model containing the outlier but with a null sample-weight
regr_with_outlier.fit(X, y)
regr_no_outlier.fit(X[:-1], y[:-1])
sample_weight = np.ones_like(y)
sample_weight[-1] = 0
regr_with_weight.fit(X, y, sample_weight=sample_weight)
score_with_outlier = regr_with_outlier.score(X[:-1], y[:-1])
score_no_outlier = regr_no_outlier.score(X[:-1], y[:-1])
score_with_weight = regr_with_weight.score(X[:-1], y[:-1])
assert score_with_outlier < score_no_outlier
assert score_with_outlier < score_with_weight
assert score_no_outlier == pytest.approx(score_with_weight)
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_adaboost_consistent_predict(algorithm):
# check that predict_proba and predict give consistent results
# regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/14084
X_train, X_test, y_train, y_test = train_test_split(
*datasets.load_digits(return_X_y=True), random_state=42
)
model = AdaBoostClassifier(algorithm=algorithm, random_state=42)
model.fit(X_train, y_train)
assert_array_equal(
np.argmax(model.predict_proba(X_test), axis=1),
model.predict(X_test)
)
@pytest.mark.parametrize(
'model, X, y',
[(AdaBoostClassifier(), iris.data, iris.target),
(AdaBoostRegressor(), diabetes.data, diabetes.target)]
)
def test_adaboost_negative_weight_error(model, X, y):
sample_weight = np.ones_like(y)
sample_weight[-1] = -10
err_msg = "sample_weight cannot contain negative weight"
with pytest.raises(ValueError, match=err_msg):
model.fit(X, y, sample_weight=sample_weight)
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/cluster/tests/test_k_means.py | 132 | 25860 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| bsd-3-clause |
brguez/TEIBA | src/python/sourceElement_validationSamples.py | 1 | 13301 | #!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
def genotypes2df(VCFObj):
"""
"""
donorGtList = []
## For each MEI in the VCF
for MEIObj in VCFObj.lineList:
# Create a series of genotype (donorId labeled)
end = (MEIObj.infoDict["BKPB"] if "BKPB" in MEIObj.infoDict else "UNK")
sourceElementId = MEIObj.chrom + ':' + str(MEIObj.pos) + '-' + str(end)
donorGt = pd.Series(MEIObj.genotypesDict, name=sourceElementId)
# Add the series to the list of series
donorGtList.append(donorGt)
## Merge line series into dataframe (row <- donor_ids, columns <- MEI_ids):
df1 = pd.concat(donorGtList, axis=1)
## Transpose dataframe (row <- MEI_ids, columns <- donor_ids)
df2 = df1.transpose()
return df2
def gt2binary(gtString):
"""
"""
genotype = gtString.split(':')[0]
# A) Homozygous reference (for unknown genotypes con)
if (genotype == '0') or (genotype == '0|0') or (genotype == '0/0') or (genotype == './.'):
boolean = 0
# B) Heterozygous or homozygous MEI (carrier/no_carrier)
else:
boolean = 1
return boolean
def series2binary(integer):
"""
"""
if (integer > 0):
boolean = 1
else:
boolean = 0
return boolean
def selectDonorSet(nbAbsentSrc, binaryGenotypes):
"""
"""
nbDonors = binaryGenotypes.shape[1]
nbSrcElements = binaryGenotypes.shape[0]
percCovered = 0
accumulatedSeries = pd.Series([0] * nbSrcElements, index=binaryGenotypes.index)
for iteration in range(1, (nbDonors + 1)):
print "** Iteration nb. ", iteration, " **"
bestDonor, newPercCovered, accumulatedSeries = selectBestDonor(nbAbsentSrc, percCovered, accumulatedSeries, binaryGenotypes)
# b)
if (newPercCovered > percCovered):
percCovered = newPercCovered
selectedDonorList.append(bestDonor)
## Discard selected donor from the dataframe;
binaryGenotypes = binaryGenotypes.drop(bestDonor, axis=1)
print "tioo: ", percCovered, len(selectedDonorList), selectedDonorList
# b)
else:
print "Stop! No percentage increase"
break
def selectBestDonor(nbAbsentSrc, percCovered, accumulatedSeries, binaryGenotypes):
"""
"""
# A key per donorId. The value will be the percentage of source elements covered after adding the candidate donors to the list of selected donors.
percCoveredDict = {}
# A key per donorId. The value will be a series containing for each source element its binary status (1: covered by the selected set of donors and 0: not covered )
unionSeriesDict = {}
for donorId in binaryGenotypes:
candidateDonorSeries = binaryGenotypes[donorId]
tmpSeries = accumulatedSeries.add(candidateDonorSeries)
unionSeries = tmpSeries.apply(series2binary)
unionSeriesDict[donorId] = unionSeries
nbCoveredCandidate = candidateDonorSeries.sum() # not needed
nbCoveredAccumulated = unionSeries.sum()
percCoveredDict[donorId] = float(nbCoveredAccumulated)/float(nbAbsentSrc)*100
## Select the donor contributing to the highest percentage of covered source elements
# Note: if there are several possible donors, select one randomly
bestDonor = max(percCoveredDict, key=percCoveredDict.get)
print "bestDonor: ", bestDonor, percCoveredDict[bestDonor]
return(bestDonor, percCoveredDict[bestDonor], unionSeriesDict[bestDonor])
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('sourceElementGt', help='')
parser.add_argument('donorMetadata', help='')
parser.add_argument('sourceElementMetadata', help='')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
sourceElementGt = args.sourceElementGt
donorMetadata = args.donorMetadata
sourceElementMetadata = args.sourceElementMetadata
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "inputVCF: ", sourceElementGt
print "donorMetadata: ", donorMetadata
print "sourceElementMetadata: ", sourceElementMetadata
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1. Read donor metadata file
#################################
# Initialize a dictionary with the following structure:
# - dict: key(donorId) -> projectCode
header("1. Read donor metadata file")
metadataFile = open(donorMetadata, 'r')
donorIdProjectCodeDict = {}
for line in metadataFile:
line = line.rstrip('\r\n')
if not line.startswith("#"):
line = line.split('\t')
donorId = line[0]
tumorType = line[9]
donorIdProjectCodeDict[donorId] = tumorType
#print "test: ", donorId, tumorType
#print "donorIdProjectCodeDict: ", donorIdProjectCodeDict
#### 2. Compute the allele count of each source element in EOPC-DE
###################################################################
## EOPC-DE is the tumor type with available samples for the validation of L1 source elements.
# Initialize a dictionary with the following structure:
# - dict1: key(sourceElementId) -> dict2: key1("alleleCount") -> value1(alleleCount value)
# key2("donorIdList") -> list of donor ids containing the insertion
# sourceElementId: chr:beg-end
header("2. Compute the allele count of each source element in EOPC-DE")
VCFObj = formats.VCF()
donorIdList = VCFObj.read_VCF_multiSample(sourceElementGt)
alleleCountsDict = {}
## For each MEI:
for MEIObj in VCFObj.lineList:
end = (MEIObj.infoDict["BKPB"] if "BKPB" in MEIObj.infoDict else "UNK")
sourceElementId = MEIObj.chrom + ':' + str(MEIObj.pos) + '-' + str(end)
print "** source element ** ", sourceElementId
## Initialize source element dictionary
alleleCountsDict[sourceElementId] = {}
alleleCountsDict[sourceElementId]["alleleCount"] = 0
alleleCountsDict[sourceElementId]["donorIdList"] = []
## For each donor:
for donorId, genotypeField in MEIObj.genotypesDict.iteritems():
genotypeFieldList = genotypeField.split(":")
genotype = genotypeFieldList[0]
#print donorId
## Project code available for current donors
if (donorId in donorIdProjectCodeDict):
projectCode = donorIdProjectCodeDict[donorId]
# Select EOPC-DE tumor types
if (projectCode == "EOPC-DE"):
#print "insertion-Gt: ", genotype
## A) Insertion absent in reference genome
if (MEIObj.alt == "<MEI>"):
# a) Heterozygous
if (genotype == "0/1"):
alleleCountsDict[sourceElementId]["alleleCount"] += 1
alleleCountsDict[sourceElementId]["donorIdList"].append(donorId)
# b) Homozygous alternative
elif (genotype == "1/1"):
alleleCountsDict[sourceElementId]["alleleCount"] += 2
alleleCountsDict[sourceElementId]["donorIdList"].append(donorId)
# Note c) possibility would be missing allele (./.)
#print "Insertion absent in reference genome", sourceElementId, donorId, projectCode, alleleCountsDict[sourceElementId]
## B) Insertion in reference genome and absent in donor genome
elif (MEIObj.ref == "<MEI>"):
#print "Insertion in reference genome", donorId, genotype, projectCode
# a) Heterozygous
if (genotype == "0/1"):
alleleCountsDict[sourceElementId]["alleleCount"] += 1
alleleCountsDict[sourceElementId]["donorIdList"].append(donorId)
# b) Homozygous reference
elif (genotype == "0/0"):
alleleCountsDict[sourceElementId]["alleleCount"] += 2
alleleCountsDict[sourceElementId]["donorIdList"].append(donorId)
# b) Project code not available for current donors (affects to few donors)
# I don't know why this happens... check later
else:
print "[ERROR] Unknown donor tumor type: ", donorId
#### 3. Make output file containing source element metadata + allele count
##############################################################################
header("3. Make output file containing source element metadata + allele count")
metadataFile = open(sourceElementMetadata, 'r')
# Open output file
outFilePath = outDir + '/sourceElements_alleleCount_EOPCDE.tsv'
outFile = open(outFilePath, 'w')
# Write header:
row = '#cytobandId' + "\t" + 'sourceIdNew' + "\t" + 'sourceIdOld' + "\t" + 'refStatus' + "\t" + 'strand' + "\t" + 'TSDlen' + "\t" + 'novelty' + "\t" + 'activityStatus' + "\t" + 'alleleCount' + "\t" + 'donorIdList' + "\n"
outFile.write(row)
for line in metadataFile:
line = line.rstrip('\r\n')
if not line.startswith("#"):
line = line.split('\t')
print "line: ", line
cytobandId, sourceIdNew, sourceIdOld, novelty, activityStatus = line
## If inconsistencies between source element identifier raise an error an skip
# Problem only affects one element
if sourceIdNew not in alleleCountsDict:
continue
print "[ERROR] source element coordenate not found "
alleleCount = alleleCountsDict[sourceIdNew]["alleleCount"]
donorIdList = (','.join(alleleCountsDict[sourceIdNew]["donorIdList"]) if len(alleleCountsDict[sourceIdNew]["donorIdList"]) > 0 else "-")
row = cytobandId + "\t" + sourceIdNew + "\t" + sourceIdOld + "\t" + novelty + "\t" + activityStatus + "\t" + str(alleleCount) + "\t" + donorIdList + "\n"
outFile.write(row)
#### 4. Make genotyping binary matrix for EOPC-DE donors
##########################################################
# The binary matrix will only contain those source elements
# that are absent in the reference genome
## carrier: 1, no_carrier: 0
header("4. Make genotyping binary matrix for EOPC-DE donors")
#### Select MEI absent in the reference genome
VCFAbsentObj = formats.VCF()
## For each MEI:
for MEIObj in VCFObj.lineList:
if (MEIObj.alt == "<MEI>"):
VCFAbsentObj.addLine(MEIObj)
#### Make binary matrix for all the donors
gtAbsentDfPCAWG = genotypes2df(VCFAbsentObj)
gtAbsentBinaryDfPCAWG = gtAbsentDfPCAWG.applymap(gt2binary)
#### Filter binary matrix selecting EOPC-DE donors
# print "gtBinaryDfPCAWG: ", gtBinaryDfPCAWG
## Make list with EOPC donor ids
EOPCdonorIdList = []
for donorId in donorIdProjectCodeDict:
projectCode = donorIdProjectCodeDict[donorId]
# Select EOPC-DE tumor types
if (projectCode == "EOPC-DE"):
EOPCdonorIdList.append(donorId)
binaryGtAbsentSrcEOPCdf = gtAbsentBinaryDfPCAWG[EOPCdonorIdList]
#### 5. Find the collection of donors maximizing the number of
################################################################
# source elements absent in the reference genome
##################################################
header("5. Find the collection of donors maximizing the number of source elements absent in the reference genome")
nbAbsentSrc = len(VCFAbsentObj.lineList)
selectedDonorList = []
print "nbAbsentSrc: ", nbAbsentSrc
selectDonorSet(nbAbsentSrc, binaryGtAbsentSrcEOPCdf)
#### 6. Report the number of source L1 in each EOPC
####################################################
# Open output file
outFilePath = outDir + '/donorId_nbSourceL1_EOPCDE.tsv'
outFile = open(outFilePath, 'w')
for donorId in binaryGtAbsentSrcEOPCdf:
sourceL1list = [ sourceId for sourceId, active in binaryGtAbsentSrcEOPCdf[donorId].iteritems() if active == 1]
nbSourceL1 = len(sourceL1list)
sourceL1Str = ",".join(sourceL1list)
row = donorId + "\t" + str(nbSourceL1) + "\t" + sourceL1Str + "\n"
outFile.write(row)
# medianNVHom = np.median([float(genotype[1]) for genotype in genotypesList if genotype[0] == '1/1'])
#binaryGtAbsentSrcEOPCdf = binaryGtAbsentSrcEOPCdf
#print "binaryGtAbsentSrcEOPCdf: ", binaryGtAbsentSrcEOPCdf
####
header("Finished")
| gpl-3.0 |
glemaitre/UnbalancedDataset | imblearn/tests/test_pipeline.py | 2 | 34905 | """
Test the pipeline module.
"""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
from tempfile import mkdtemp
import shutil
import time
import numpy as np
from pytest import raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_allclose
from sklearn.base import clone, BaseEstimator
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.datasets import load_iris, make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.externals.joblib import Memory
from imblearn.pipeline import Pipeline, make_pipeline
from imblearn.under_sampling import (RandomUnderSampler,
EditedNearestNeighbours as ENN)
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
R_TOL = 1e-4
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X, y=None):
return X
class Transf(NoInvTransf):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
class DummySampler(NoTrans):
"""Samplers which returns a balanced number of samples"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
def sample(self, X, y):
return X, y
def fit_sample(self, X, y):
return self.fit(X, y).sample(X, y)
class FitTransformSample(NoTrans):
"""Estimator implementing both transform and sample
"""
def fit(self, X, y, should_succeed=False):
pass
def sample(self, X, y=None):
return X, y
def transform(self, X, y=None):
return X
def test_pipeline_init():
# Test the various init parameters of the pipeline.
with raises(TypeError):
Pipeline()
# Check that we can't instantiate pipelines with objects without fit
# method
error_regex = 'Last step of Pipeline should implement fit. .*NoFit.*'
with raises(TypeError, match=error_regex):
Pipeline([('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
expected = dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False))
assert pipe.get_params(deep=True) == expected
# Check that params are set
pipe.set_params(svc__a=0.1)
assert clf.a == 0.1
assert clf.b is None
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
error_regex = 'implement fit and transform or sample'
with raises(TypeError, match=error_regex):
Pipeline([('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert clf.C == 0.1
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
with raises(ValueError):
pipe.set_params(anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert not pipe.named_steps['svc'] is pipe2.named_steps['svc']
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert params == params2
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert pipe.predict(None)
# and transformer params should not be changed
assert pipe.named_steps['transf'].a is None
assert pipe.named_steps['transf'].b is None
# invalid parameters should raise an error message
with raises(TypeError, match="unexpected keyword argument"):
pipe.fit(None, None, clf__bad=True)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X, y=None)
assert pipe.score(X) == 3
assert pipe.score(X, y=None) == 3
assert pipe.score(X, y=None, sample_weight=None) == 3
assert pipe.score(X, sample_weight=np.array([2, 3])) == 8
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())])
pipe.fit(X, y=None)
assert pipe.score(X) == 3
assert pipe.score(X, sample_weight=None) == 3
with raises(TypeError, match="unexpected keyword argument"):
pipe.score(X, sample_weight=np.array([2, 3]))
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
with raises(ValueError, match="Invalid parameter"):
pipe.set_params(fake='nope')
# nested model check
with raises(ValueError, match="Invalid parameter"):
pipe.set_params(fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert predict.shape == (n_samples,)
proba = pipe.predict_proba(X)
assert proba.shape == (n_samples, n_classes)
log_proba = pipe.predict_log_proba(X)
assert log_proba.shape == (n_samples, n_classes)
decision_function = pipe.decision_function(X)
assert decision_function.shape == (n_samples, n_classes)
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([
('scaler', scaler_for_pipeline),
('Kmeans', km_for_pipeline)
])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
error_regex = "'PCA' object has no attribute 'fit_predict'"
with raises(AttributeError, match=error_regex):
getattr(pipe, 'fit_predict')
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())])
pipe.fit_predict(X=None,
y=None,
transf__should_get_this=True,
clf__should_succeed=True)
assert pipe.named_steps['transf'].fit_params['should_get_this']
assert pipe.named_steps['clf'].successful
assert 'should_succeed' not in pipe.named_steps['transf'].fit_params
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert pipeline.named_steps['mock'] is transf1
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert 'mock' not in pipeline.named_steps
assert pipeline.named_steps['mock2'] is transf2
assert [('mock2', transf2)] == pipeline.steps
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert [('mock', transf1)] == pipeline.steps
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert [('mock', transf2)] == pipeline.steps
# With invalid data
pipeline.set_params(steps=[('junk', ())])
with raises(TypeError):
pipeline.fit([[1]], [1])
with raises(TypeError):
pipeline.fit_transform([[1]], [1])
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
expected_params = {'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'memory': None,
'm2__mult': 2,
'last__mult': 5}
assert pipeline.get_params(deep=True) == expected_params
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
pipeline.fit(X, y)
pipeline.transform(X)
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
with raises(AttributeError, match="has no attribute 'predict'"):
getattr(pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert not hasattr(pipeline, 'predict')
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert not hasattr(pipeline, 'predict')
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert not hasattr(pipeline, 'predict')
pipeline.transform
assert not hasattr(pipeline, 'inverse_transform')
pipeline = make_pipeline(NoInvTransf(), Transf())
assert not hasattr(pipeline, 'predict')
pipeline.transform
assert not hasattr(pipeline, 'inverse_transform')
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert isinstance(pipe, Pipeline)
assert pipe.steps[0][0] == "transf-1"
assert pipe.steps[1][0] == "transf-2"
pipe = make_pipeline(t1, t2, FitParamT())
assert isinstance(pipe, Pipeline)
assert pipe.steps[0][0] == "transf-1"
assert pipe.steps[1][0] == "transf-2"
assert pipe.steps[2][0] == "fitparamt"
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
with raises(AttributeError):
getattr(reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
with raises(AttributeError):
getattr(clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_pipeline_wrong_memory():
# Test that an error is raised when memory is not a string or a Memory
# instance
iris = load_iris()
X = iris.data
y = iris.target
# Define memory as an integer
memory = 1
cached_pipe = Pipeline([('transf', DummyTransf()), ('svc', SVC())],
memory=memory)
error_regex = ("'memory' should either be a string or a joblib.Memory"
" instance, got 'memory=1' instead.")
with raises(ValueError, match=error_regex):
cached_pipe.fit(X, y)
def test_pipeline_memory_transformer():
iris = load_iris()
X = iris.data
y = iris.target
cachedir = mkdtemp()
try:
memory = Memory(cachedir=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(probability=True, random_state=0)
transf = DummyTransf()
pipe = Pipeline([('transf', clone(transf)), ('svc', clf)])
cached_pipe = Pipeline([('transf', transf), ('svc', clf)],
memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the tranformer in the cached pipeline
expected_ts = cached_pipe.named_steps['transf'].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert not hasattr(transf, 'means_')
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert cached_pipe.named_steps['transf'].timestamp_ == expected_ts
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(probability=True, random_state=0)
transf_2 = DummyTransf()
cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)],
memory=memory)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X),
cached_pipe_2.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe_2.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe_2.named_steps['transf_2'].means_)
assert cached_pipe_2.named_steps['transf_2'].timestamp_ == expected_ts
finally:
shutil.rmtree(cachedir)
def test_pipeline_memory_sampler():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
cachedir = mkdtemp()
try:
memory = Memory(cachedir=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(probability=True, random_state=0)
transf = DummySampler()
pipe = Pipeline([('transf', clone(transf)), ('svc', clf)])
cached_pipe = Pipeline([('transf', transf), ('svc', clf)],
memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the tranformer in the cached pipeline
expected_ts = cached_pipe.named_steps['transf'].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert not hasattr(transf, 'means_')
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert cached_pipe.named_steps['transf'].timestamp_ == expected_ts
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(probability=True, random_state=0)
transf_2 = DummySampler()
cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)],
memory=memory)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X),
cached_pipe_2.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe_2.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe_2.named_steps['transf_2'].means_)
assert cached_pipe_2.named_steps['transf_2'].timestamp_ == expected_ts
finally:
shutil.rmtree(cachedir)
def test_pipeline_methods_pca_rus_svm():
# Test the various methods of the pipeline (pca + svm).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA()
rus = RandomUnderSampler(random_state=0)
pipe = Pipeline([('pca', pca), ('rus', rus), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_rus_pca_svm():
# Test the various methods of the pipeline (pca + svm).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA()
rus = RandomUnderSampler(random_state=0)
pipe = Pipeline([('rus', rus), ('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_sample():
# Test whether pipeline works with a sampler at the end.
# Also test pipeline.sampler
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
rus = RandomUnderSampler(random_state=0)
pipeline = Pipeline([('rus', rus)])
# test transform and fit_transform:
X_trans, y_trans = pipeline.fit(X, y).sample(X, y)
X_trans2, y_trans2 = pipeline.fit_sample(X, y)
X_trans3, y_trans3 = rus.fit_sample(X, y)
assert_allclose(X_trans, X_trans2, rtol=R_TOL)
assert_allclose(X_trans, X_trans3, rtol=R_TOL)
assert_allclose(y_trans, y_trans2, rtol=R_TOL)
assert_allclose(y_trans, y_trans3, rtol=R_TOL)
pca = PCA()
pipeline = Pipeline([('pca', PCA()),
('rus', rus)])
X_trans, y_trans = pipeline.fit(X, y).sample(X, y)
X_pca = pca.fit_transform(X)
X_trans2, y_trans2 = rus.fit_sample(X_pca, y)
# We round the value near to zero. It seems that PCA has some issue
# with that
X_trans[np.bitwise_and(X_trans < R_TOL, X_trans > -R_TOL)] = 0
X_trans2[np.bitwise_and(X_trans2 < R_TOL, X_trans2 > -R_TOL)] = 0
assert_allclose(X_trans, X_trans2, rtol=R_TOL)
assert_allclose(y_trans, y_trans2, rtol=R_TOL)
def test_pipeline_sample_transform():
# Test whether pipeline works with a sampler at the end.
# Also test pipeline.sampler
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
rus = RandomUnderSampler(random_state=0)
pca = PCA()
pca2 = PCA()
pipeline = Pipeline([('pca', pca), ('rus', rus), ('pca2', pca2)])
pipeline.fit(X, y).transform(X)
def test_pipeline_none_classifier():
# Test pipeline using None as preprocessing step and a classifier
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
clf = LogisticRegression(random_state=0)
pipe = make_pipeline(None, clf)
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.decision_function(X)
pipe.score(X, y)
def test_pipeline_none_sampler_classifier():
# Test pipeline using None, RUS and a classifier
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
clf = LogisticRegression(random_state=0)
rus = RandomUnderSampler(random_state=0)
pipe = make_pipeline(None, rus, clf)
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.decision_function(X)
pipe.score(X, y)
def test_pipeline_sampler_none_classifier():
# Test pipeline using RUS, None and a classifier
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
clf = LogisticRegression(random_state=0)
rus = RandomUnderSampler(random_state=0)
pipe = make_pipeline(rus, None, clf)
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.decision_function(X)
pipe.score(X, y)
def test_pipeline_none_sampler_sample():
# Test pipeline using None step and a sampler
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
rus = RandomUnderSampler(random_state=0)
pipe = make_pipeline(None, rus)
pipe.fit(X, y)
pipe.sample(X, y)
def test_pipeline_none_transformer():
# Test pipeline using None and a transformer that implements transform and
# inverse_transform
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
pca = PCA(whiten=True)
pipe = make_pipeline(None, pca)
pipe.fit(X, y)
X_trans = pipe.transform(X)
X_inversed = pipe.inverse_transform(X_trans)
assert_array_almost_equal(X, X_inversed)
def test_pipeline_methods_anova_rus():
# Test the various methods of the pipeline (anova).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
# Test with RandomUnderSampling + Anova + LogisticRegression
clf = LogisticRegression()
rus = RandomUnderSampler(random_state=0)
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('rus', rus), ('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_with_step_that_implements_both_sample_and_transform():
# Test the various methods of the pipeline (anova).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
clf = LogisticRegression()
with raises(TypeError):
Pipeline([('step', FitTransformSample()), ('logistic', clf)])
def test_pipeline_with_step_that_it_is_pipeline():
# Test the various methods of the pipeline (anova).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
# Test with RandomUnderSampling + Anova + LogisticRegression
clf = LogisticRegression()
rus = RandomUnderSampler(random_state=0)
filter1 = SelectKBest(f_classif, k=2)
pipe1 = Pipeline([('rus', rus), ('anova', filter1)])
with raises(TypeError):
Pipeline([('pipe1', pipe1), ('logistic', clf)])
def test_pipeline_fit_then_sample_with_sampler_last_estimator():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=50000,
random_state=0)
rus = RandomUnderSampler(random_state=42)
enn = ENN()
pipeline = make_pipeline(rus, enn)
X_fit_sample_resampled, y_fit_sample_resampled = pipeline.fit_sample(X, y)
pipeline = make_pipeline(rus, enn)
pipeline.fit(X, y)
X_fit_then_sample_res, y_fit_then_sample_res = pipeline.sample(X, y)
assert_array_equal(X_fit_sample_resampled, X_fit_then_sample_res)
assert_array_equal(y_fit_sample_resampled, y_fit_then_sample_res)
def test_pipeline_fit_then_sample_3_samplers_with_sampler_last_estimator():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=50000,
random_state=0)
rus = RandomUnderSampler(random_state=42)
enn = ENN()
pipeline = make_pipeline(rus, enn, rus)
X_fit_sample_resampled, y_fit_sample_resampled = pipeline.fit_sample(X, y)
pipeline = make_pipeline(rus, enn, rus)
pipeline.fit(X, y)
X_fit_then_sample_res, y_fit_then_sample_res = pipeline.sample(X, y)
assert_array_equal(X_fit_sample_resampled, X_fit_then_sample_res)
assert_array_equal(y_fit_sample_resampled, y_fit_then_sample_res)
| mit |
zaxtax/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
fabianp/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
hsiaoyi0504/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/io/tests/test_stata.py | 2 | 44490 | # pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
sr_115 = StataReader(self.dta16_115).variable_labels()
sr_117 = StataReader(self.dta16_117).variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k,v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
sr = StataReader(path)
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b','h','l')
df = DataFrame([[0.0]],columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
valid_range = StataReader(path).VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0,27):
val = StataMissingValue(offset+1+i)
self.assertTrue(val.string == expected_values[i])
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f',b'\x00\x00\x00\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<f',b'\x00\xd0\x00\x7f')[0])
self.assertTrue(val.string == '.z')
# Test extremes for floats
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
self.assertTrue(val.string == '.z')
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i+(j*27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data,columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = read_stata(self.dta17_117, convert_missing=True)
tm.assert_frame_equal(expected, parsed_113)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999,12,24)
expected[2][3] = datetime(9999,12,1)
expected[2][4] = datetime(9999,10,1)
expected[2][5] = datetime(9999,7,1)
expected[4][2] = datetime(2262,4,16)
expected[4][3] = expected[4][4] = datetime(2262,4,1)
expected[4][5] = expected[4][6] = datetime(2262,1,1)
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677,10,1)
expected[5][5] = expected[5][6] = datetime(1678,1,1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
date_conversion = dict((c, c[-2:]) for c in columns)
#{c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with tm.assertRaises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
def test_categorical_writing(self):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category') for col in original], axis=1)
expected['incompletely_labeled'] = expected['incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
# Silence warnings
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), expected)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with tm.ensure_clean() as path:
tm.assertRaises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with warnings.catch_warnings(record=True) as w:
original.to_stata(path)
tm.assert_equal(len(w), 1) # should get a warning for mixed content
def test_categorical_with_stata_missing_values(self):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_categorical_order(self):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
(True, 'reverse', ['a', 'b', 'c', 'd', 'e'], np.arange(5)[::-1]),
(True, 'noorder', ['a', 'b', 'c', 'd', 'e'], np.array([2, 1, 4, 0, 3])),
(True, 'floating', ['a', 'b', 'c', 'd', 'e'], np.arange(0, 5)),
(True, 'float_missing', ['a', 'd', 'e'], np.array([0, 1, 2, -1, -1])),
(False, 'nolabel', [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'], np.arange(5))]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_items(cols)
# Read with and with out categoricals, ensure order is identical
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
parsed_115[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
parsed_115[col].cat.categories)
def test_categorical_sorting(self):
parsed_115 = read_stata(self.dta20_115)
parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
parsed_115 = parsed_115.sort("srh")
parsed_117 = parsed_117.sort("srh")
# Don't sort index
parsed_115.index = np.arange(parsed_115.shape[0])
parsed_117.index = np.arange(parsed_117.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
expected = pd.Series(pd.Categorical.from_codes(codes=codes,
categories=categories))
tm.assert_series_equal(expected, parsed_115["srh"])
tm.assert_series_equal(expected, parsed_117["srh"])
def test_categorical_ordering(self):
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
parsed_115_unordered = read_stata(self.dta19_115,
order_categoricals=False)
parsed_117_unordered = read_stata(self.dta19_117,
order_categoricals=False)
for col in parsed_115:
if not is_categorical_dtype(parsed_115[col]):
continue
tm.assert_equal(True, parsed_115[col].cat.ordered)
tm.assert_equal(True, parsed_117[col].cat.ordered)
tm.assert_equal(False, parsed_115_unordered[col].cat.ordered)
tm.assert_equal(False, parsed_117_unordered[col].cat.ordered)
def test_read_chunks_117(self):
files_117 = [self.dta1_117, self.dta2_117, self.dta3_117,
self.dta4_117, self.dta14_117, self.dta15_117,
self.dta16_117, self.dta17_117, self.dta18_117,
self.dta19_117, self.dta20_117]
for fname in files_117:
for chunksize in 1,2:
for convert_categoricals in False, True:
for convert_dates in False, True:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(fname, convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(fname, iterator=True)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
try:
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
except AssertionError:
# datetime.datetime and pandas.tslib.Timestamp may hold
# equivalent values but fail assert_frame_equal
assert(all([x == y for x, y in zip(from_frame, chunk)]))
pos += chunksize
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
itr = read_stata(fname, iterator=True)
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
itr = read_stata(fname, chunksize=5)
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
itr = read_stata(fname, iterator=True)
chunk = itr.get_chunk(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
itr = read_stata(fname, chunksize=5)
chunk = itr.get_chunk()
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
def test_read_chunks_115(self):
files_115 = [self.dta2_115, self.dta3_115, self.dta4_115,
self.dta14_115, self.dta15_115, self.dta16_115,
self.dta17_115, self.dta18_115, self.dta19_115,
self.dta20_115]
for fname in files_115:
for chunksize in 1,2:
for convert_categoricals in False, True:
for convert_dates in False, True:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(fname, convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(fname, iterator=True,
convert_categoricals=convert_categoricals)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
try:
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
except AssertionError:
# datetime.datetime and pandas.tslib.Timestamp may hold
# equivalent values but fail assert_frame_equal
assert(all([x == y for x, y in zip(from_frame, chunk)]))
pos += chunksize
def test_read_chunks_columns(self):
fname = self.dta3_117
columns = ['quarter', 'cpi', 'm1']
chunksize = 2
parsed = read_stata(fname, columns=columns)
itr = read_stata(fname, iterator=True)
pos = 0
for j in range(5):
chunk = itr.read(chunksize, columns=columns)
if chunk is None:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
0asa/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 40 | 8143 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
"""Histogram kernel implemented as a callable."""
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
#X_pred2 = kpca.inverse_transform(X_pred_transformed)
#assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
"""Test the linear separability of the first 2D KPCA transform"""
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
Yuliang-Zou/Automatic_Group_Photography_Enhancement | tools/demo.py | 1 | 5199 | import _init_paths
import tensorflow as tf
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect, im_detect_ori
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import os, sys, cv2
import argparse
from networks.factory import get_network
import ipdb
# (Yuliang) Background + voc(w/o person) + face
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'pottedplant', 'sheep',
'sofa', 'train', 'tvmonitor', 'face')
def vis_detections(im, class_name, dets, eyes, smiles, ax, thresh=0.9):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
eye = eyes[i, 1]
smile = smiles[i, 1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.text(bbox[0], bbox[3] + 2,
'Open eye score: {:.3f}'.format(eye),
bbox=dict(facecolor='green', alpha = 0.5),
fontsize=12, color='white')
ax.text(bbox[0], bbox[3] + 30,
'Smiling score: {:.3f}'.format(smile),
bbox=dict(facecolor='green', alpha = 0.5),
fontsize=12, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(sess, net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
#im_file = os.path.join('/home/corgi/Lab/label/pos_frame/ACCV/training/000001/',image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
# scores, boxes = im_detect(sess, net, im)
scores, boxes, eyes, smiles = im_detect_ori(sess, net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(8, 8))
ax.imshow(im, aspect='equal')
CONF_THRESH = 0.9
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[20:]):
cls_ind += 20 # because we skipped everything except face
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
eye = eyes[keep, :]
smile= smiles[keep, :]
vis_detections(im, cls, dets, eye, smile, ax, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
default='VGGnet_test')
parser.add_argument('--model', dest='model', help='Model path',
default=' ')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
if args.model == ' ':
raise IOError(('Error: Model not found.\n'))
# init session
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# load network
net = get_network(args.demo_net)
# load model
saver = tf.train.Saver()
saver.restore(sess, args.model)
#sess.run(tf.initialize_all_variables())
print '\n\nLoaded network {:s}'.format(args.model)
# Warmup on a dummy image
im = 128 * np.ones((300, 300, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(sess, net, im)
im_names = ['img_46.jpg', 'img_208.jpg', 'img_269.jpg',
'img_339.jpg', 'img_726.jpg', 'img_843.jpg']
# im_names = ['f2_1.png', 'f2_2.png', 'f2_3.png', 'f2_4.png']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(sess, net, im_name)
plt.show()
| mit |
ulmo-dev/ulmo-common | setup.py | 3 | 2204 | import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
with open('README.rst') as f:
# use README for long description but don't include the link to travis-ci;
# it seems a bit out of place on pypi since it applies to the development
# version
long_description = ''.join([
line for line in f.readlines()
if 'travis-ci' not in line])
# this sets __version__
info = {}
execfile(os.path.join('ulmo', 'version.py'), info)
setup(
name='ulmo',
version=info['__version__'],
license='BSD',
author='Andy Wilson',
author_email='[email protected]',
description='clean, simple and fast access to public hydrology and climatology data',
long_description=long_description,
url='https://github.com/ulmo-dev/ulmo/',
keywords='his pyhis ulmo water waterml cuahsi wateroneflow',
packages=find_packages(),
platforms='any',
install_requires=[
'appdirs>=1.2.0',
'beautifulsoup4>=4.1.3',
'geojson',
'isodate>=0.4.6',
'lxml>=2.3',
# mock is required for mocking pytables-related functionality when it doesn't exist
'mock>=1.0.0',
'numpy>=1.4.0',
'pandas>=0.11',
'requests>=1.1',
'suds>=0.4',
],
extras_require={
'pytables_caching': ['tables>=2.3.0']
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
tests_require=[
'freezegun>=0.1.4',
'pytest>=2.3.2',
'httpretty>=0.5.8',
],
cmdclass={'test': PyTest},
)
| bsd-3-clause |
ellisonbg/altair | altair/utils/data.py | 1 | 4989 | import json
import random
import uuid
import pandas as pd
from toolz.curried import curry, pipe # noqa
from typing import Callable
from .core import sanitize_dataframe
from .plugin_registry import PluginRegistry
# ==============================================================================
# Data transformer registry
# ==============================================================================
DataTransformerType = Callable
class DataTransformerRegistry(PluginRegistry[DataTransformerType]):
pass
# ==============================================================================
# Data model transformers
#
# A data model transformer is a pure function that takes a dict or DataFrame
# and returns a transformed version of a dict or DataFrame. The dict objects
# will be the Data portion of the VegaLite schema. The idea is that user can
# pipe a sequence of these data transformers together to prepare the data before
# it hits the renderer.
#
# In this version of Altair, renderers only deal with the dict form of a
# VegaLite spec, after the Data model has been put into a schema compliant
# form.
#
# A data model transformer has the following type signature:
# DataModelType = Union[dict, pd.DataFrame]
# DataModelTransformerType = Callable[[DataModelType, KwArgs], DataModelType]
# ==============================================================================
class MaxRowsError(Exception):
"""Raised when a data model has too many rows."""
pass
@curry
def limit_rows(data, max_rows=5000):
"""Raise MaxRowsError if the data model has more than max_rows.
If max_rows is None, then do not perform any check.
"""
check_data_type(data)
if isinstance(data, pd.DataFrame):
values = data
elif isinstance(data, dict):
if 'values' in data:
values = data['values']
else:
return data
if max_rows is not None and len(values) > max_rows:
raise MaxRowsError('The number of rows in your dataset is greater '
'than the maximum allowed ({0}). '
'For information on how to plot larger datasets '
'in Altair, see the documentation'.format(max_rows))
return data
@curry
def sample(data, n=None, frac=None):
"""Reduce the size of the data model by sampling without replacement."""
check_data_type(data)
if isinstance(data, pd.DataFrame):
return data.sample(n=n, frac=frac)
elif isinstance(data, dict):
if 'values' in data:
values = data['values']
n = n if n else int(frac*len(values))
values = random.sample(values, n)
return {'values': values}
@curry
def to_json(data, prefix='altair-data'):
"""Write the data model to a .json file and return a url based data model."""
check_data_type(data)
ext = '.json'
filename = _compute_filename(prefix=prefix, ext=ext)
if isinstance(data, pd.DataFrame):
data = sanitize_dataframe(data)
data.to_json(filename, orient='records')
elif isinstance(data, dict):
if 'values' not in data:
raise KeyError('values expected in data dict, but not present.')
values = data['values']
with open(filename) as f:
json.dump(values, f)
return {
'url': filename,
'format': {'type': 'json'}
}
@curry
def to_csv(data, prefix='altair-data'):
"""Write the data model to a .csv file and return a url based data model."""
check_data_type(data)
ext = '.csv'
filename = _compute_filename(prefix=prefix, ext=ext)
if isinstance(data, pd.DataFrame):
data = sanitize_dataframe(data)
data.to_csv(filename)
return {
'url': filename,
'format': {'type': 'csv'}
}
elif isinstance(data, dict):
raise NotImplementedError('to_csv only works with Pandas DataFrame objects.')
@curry
def to_values(data):
"""Replace a DataFrame by a data model with values."""
check_data_type(data)
if isinstance(data, pd.DataFrame):
data = sanitize_dataframe(data)
return {'values': data.to_dict(orient='records')}
elif isinstance(data, dict):
if 'values' not in data:
raise KeyError('values expected in data dict, but not present.')
return data
def check_data_type(data):
"""Raise if the data is not a dict or DataFrame."""
if not isinstance(data, (dict, pd.DataFrame)):
raise TypeError('Expected dict or DataFrame, got: {}'.format(type(data)))
# ==============================================================================
# Private utilities
# ==============================================================================
def _compute_uuid_filename(prefix, ext):
return prefix + '-' + str(uuid.uuid4()) + ext
def _compute_filename(prefix='altair-data', ext='.csv'):
filename = _compute_uuid_filename(prefix, ext)
return filename
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/gaussian_process/tests/test_gpr.py | 23 | 11915 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
| unlicense |
cgriffwx/programingworkshop | Python/pandas_and_parallel/meso_surface.py | 8 | 2237 | import pandas as pd
import os
import datetime as dt
import numpy as np
import mesonet_calculations
from plotting import sfc_plot
''' For reading in and creating surface plots of mesonet data in a given time
interval saved in folders in the current working directory for each variable
plotted
'''
variables = {'temperature' : ('Degrees Celsius', '2 m Temperature'),
'pressure': ('Millibars', 'Sea Level Pressure'),
'dew_point': ('Degrees Celsius', 'Dewpoint'),
'wind_speed': ('Meters per second', '10 m Scalar Wind Speed'),
'gust_speed': ('Meters per second', '10 m Gust Wind Speed'),
'rainfall': ('Inches', 'Rainfall'),
}
# to_plot = 'temperature'
to_plot = np.array(['temperature', 'pressure', 'dew_point', 'wind_speed',
'gust_speed', 'rainfall'])
# Note that the start time should be divisble by 5 minutes
starttime = dt.datetime(2012, 6, 15, 1, 0)
endtime = dt.datetime(2012, 6, 15, 2, 0)
filename = 'locations.txt'
locations = pd.read_csv(filename, sep=' ')
met = pd.DataFrame()
dir = os.listdir('raw_data')
for file in dir:
if file[-4:] == '.txt':
met = pd.concat([met,
mesonet_calculations.meso_operations('raw_data/%s' %(file),
starttime,endtime,locations)], axis=0)
xmin = np.min(met['Lon'])
xmax = np.max(met['Lon'])
ymin = np.min(met['Lat'])
ymax = np.max(met['Lat'])
xi, yi = np.meshgrid(np.linspace(xmin, xmax, 200),
np.linspace(ymin, ymax, 200))
sfc_plot(starttime, endtime, to_plot[0], variables[to_plot[0]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[1], variables[to_plot[1]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[2], variables[to_plot[2]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[3], variables[to_plot[3]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[4], variables[to_plot[4]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[5], variables[to_plot[5]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
| mit |
fabianp/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
loli/semisupervisedforests | sklearn/cluster/tests/test_hierarchical.py | 5 | 18965 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# Deprecation of Ward class
assert_warns(DeprecationWarning, Ward).fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
"""
Check that we obtain the correct solution for structured linkage trees.
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
"""
Check that we obtain the correct solution for unstructured linkage trees.
"""
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
"""
Check that the height of the results of linkage tree is sorted.
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
"""
Check that we obtain the correct number of clusters with
agglomerative clustering.
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
assert_warns(DeprecationWarning, WardAgglomeration)
with ignore_warnings():
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_array_equal(agglo.labels_, ward.labels_)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
"""
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
connectivity = kneighbors_graph(X, 10)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
"""
Check that children are ordered in the same way for both structured and
unstructured versions of ward_tree.
"""
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
"""Test return_distance option on linkage and ward trees"""
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
"""Test that the full tree is computed if n_clusters is small"""
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
btabibian/scikit-learn | examples/bicluster/plot_bicluster_newsgroups.py | 14 | 5895 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
"""
from __future__ import print_function
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
print(__doc__)
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis],
# cols].sum() but much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/neighbors/tests/test_dist_metrics.py | 36 | 6957 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils.testing import SkipTest, assert_raises_regex
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
assert_raises_regex(TypeError,
"Custom distance function must accept two vectors",
BallTree, X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previoulsly, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X))
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/IPython/external/qt_for_kernel.py | 12 | 3192 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask QT_API env variable
if QT_API not set:
ask matplotlib what it's using. If Qt4Agg or Qt5Agg, then use the
version matplotlib is configured with
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try in this order:
PyQt default version, PySide, PyQt5
else:
use what QT_API says
"""
# NOTE: This is no longer an external, third-party module, and should be
# considered part of IPython. For compatibility however, it is being kept in
# IPython/external.
import os
import sys
from IPython.utils.version import check_version
from IPython.external.qt_loaders import (load_qt, loaded_api, QT_API_PYSIDE,
QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5,
QT_API_PYQTv1, QT_API_PYQT_DEFAULT)
_qt_apis = (QT_API_PYSIDE, QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5, QT_API_PYQTv1,
QT_API_PYQT_DEFAULT)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
backend = mpl.rcParams.get('backend', None)
if backend == 'Qt4Agg':
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt4v2':
return [QT_API_PYQT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
elif backend == 'Qt5Agg':
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
qt_api = os.environ.get('QT_API', None)
if qt_api is None:
#no ETS variable. Ask mpl, then use default fallback path
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE,
QT_API_PYQT5, QT_API_PYSIDE2]
elif qt_api not in _qt_apis:
raise RuntimeError("Invalid Qt API %r, valid values are: %r" %
(qt_api, ', '.join(_qt_apis)))
else:
return [qt_api]
api_opts = get_options()
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
| gpl-3.0 |
devttys0/binwalk | src/binwalk/modules/entropy.py | 1 | 11907 | # Calculates and optionally plots the entropy of input files.
import os
import sys
import math
import zlib
import binwalk.core.common
from binwalk.core.compat import *
from binwalk.core.module import Module, Option, Kwarg
#try:
# import numpy as np
#except ImportError:
# pass
try:
from numba import njit
except ImportError:
def njit(func):
return func
class Entropy(Module):
XLABEL = 'Offset'
YLABEL = 'Entropy'
XUNITS = 'B'
YUNITS = 'E'
FILE_WIDTH = 1024
FILE_FORMAT = 'png'
COLORS = ['g', 'r', 'c', 'm', 'y']
DEFAULT_BLOCK_SIZE = 1024
DEFAULT_DATA_POINTS = 2048
DEFAULT_TRIGGER_HIGH = .95
DEFAULT_TRIGGER_LOW = .85
TITLE = "Entropy"
ORDER = 8
# TODO: Add --dpoints option to set the number of data points?
CLI = [
Option(short='E',
long='entropy',
kwargs={'enabled': True},
description='Calculate file entropy'),
Option(short='F',
long='fast',
kwargs={'use_zlib': True},
description='Use faster, but less detailed, entropy analysis'),
Option(short='J',
long='save',
kwargs={'save_plot': True},
description='Save plot as a PNG'),
Option(short='Q',
long='nlegend',
kwargs={'show_legend': False},
description='Omit the legend from the entropy plot graph'),
Option(short='N',
long='nplot',
kwargs={'do_plot': False},
description='Do not generate an entropy plot graph'),
Option(short='H',
long='high',
type=float,
kwargs={'trigger_high': DEFAULT_TRIGGER_HIGH},
description='Set the rising edge entropy trigger threshold (default: %.2f)' % DEFAULT_TRIGGER_HIGH),
Option(short='L',
long='low',
type=float,
kwargs={'trigger_low': DEFAULT_TRIGGER_LOW},
description='Set the falling edge entropy trigger threshold (default: %.2f)' % DEFAULT_TRIGGER_LOW),
]
KWARGS = [
Kwarg(name='enabled', default=False),
Kwarg(name='save_plot', default=False),
Kwarg(name='trigger_high', default=DEFAULT_TRIGGER_HIGH),
Kwarg(name='trigger_low', default=DEFAULT_TRIGGER_LOW),
Kwarg(name='use_zlib', default=False),
Kwarg(name='display_results', default=True),
Kwarg(name='do_plot', default=True),
Kwarg(name='show_legend', default=True),
Kwarg(name='block_size', default=0),
]
# Run this module last so that it can process all other module's results
# and overlay them on the entropy graph
PRIORITY = 0
def init(self):
self.HEADER[-1] = "ENTROPY"
self.max_description_length = 0
self.file_markers = {}
self.output_file = None
if self.use_zlib:
self.algorithm = self.gzip
else:
if 'numpy' in sys.modules:
self.algorithm = self.shannon_numpy
else:
self.algorithm = self.shannon
# Get a list of all other module's results to mark on the entropy graph
for (module, obj) in iterator(self.modules):
for result in obj.results:
if result.plot and result.file and result.description:
description = result.description.split(',')[0]
if not has_key(self.file_markers, result.file.name):
self.file_markers[result.file.name] = []
if len(description) > self.max_description_length:
self.max_description_length = len(description)
self.file_markers[result.file.name].append((result.offset, description))
# If other modules have been run and they produced results, don't spam
# the terminal with entropy results
if self.file_markers:
self.display_results = False
if not self.block_size:
if self.config.block:
self.block_size = self.config.block
else:
self.block_size = None
def _entropy_sigterm_handler(self, *args):
print ("Fuck it all.")
def run(self):
self._run()
def _run(self):
# Sanity check and warning if matplotlib isn't found
if self.do_plot:
try:
# If we're saving the plot to a file, configure matplotlib
# to use the Agg back-end. This does not require a X server,
# allowing users to generate plot files on headless systems.
if self.save_plot:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
except ImportError as e:
binwalk.core.common.warning("Failed to import matplotlib module, visual entropy graphing will be disabled")
self.do_plot = False
for fp in iter(self.next_file, None):
if self.display_results:
self.header()
self.calculate_file_entropy(fp)
if self.display_results:
self.footer()
def calculate_file_entropy(self, fp):
# Tracks the last displayed rising/falling edge (0 for falling, 1 for
# rising, None if nothing has been printed yet)
last_edge = None
# Auto-reset the trigger; if True, an entropy above/below
# self.trigger_high/self.trigger_low will be printed
trigger_reset = True
# Clear results from any previously analyzed files
self.clear(results=True)
# If -K was not specified, calculate the block size to create
# DEFAULT_DATA_POINTS data points
if self.block_size is None:
block_size = fp.size / self.DEFAULT_DATA_POINTS
# Round up to the nearest DEFAULT_BLOCK_SIZE (1024)
block_size = int(block_size + ((self.DEFAULT_BLOCK_SIZE - block_size) % self.DEFAULT_BLOCK_SIZE))
else:
block_size = self.block_size
# Make sure block size is greater than 0
if block_size <= 0:
block_size = self.DEFAULT_BLOCK_SIZE
binwalk.core.common.debug("Entropy block size (%d data points): %d" %
(self.DEFAULT_DATA_POINTS, block_size))
while True:
file_offset = fp.tell()
(data, dlen) = fp.read_block()
if dlen < 1:
break
i = 0
while i < dlen:
entropy = self.algorithm(data[i:i + block_size])
display = self.display_results
description = "%f" % entropy
if not self.config.verbose:
if last_edge in [None, 0] and entropy > self.trigger_low:
trigger_reset = True
elif last_edge in [None, 1] and entropy < self.trigger_high:
trigger_reset = True
if trigger_reset and entropy >= self.trigger_high:
description = "Rising entropy edge (%f)" % entropy
display = self.display_results
last_edge = 1
trigger_reset = False
elif trigger_reset and entropy <= self.trigger_low:
description = "Falling entropy edge (%f)" % entropy
display = self.display_results
last_edge = 0
trigger_reset = False
else:
display = False
description = "%f" % entropy
r = self.result(offset=(file_offset + i),
file=fp,
entropy=entropy,
description=description,
display=display)
i += block_size
if self.do_plot:
self.plot_entropy(fp.name)
def shannon(self, data):
'''
Performs a Shannon entropy analysis on a given block of data.
'''
entropy = 0
if data:
length = len(data)
seen = dict(((chr(x), 0) for x in range(0, 256)))
for byte in data:
seen[byte] += 1
for x in range(0, 256):
p_x = float(seen[chr(x)]) / length
if p_x > 0:
entropy -= p_x * math.log(p_x, 2)
return (entropy / 8)
def shannon_numpy(self, data):
if data:
return self._shannon_numpy(bytes2str(data))
else:
return 0
@staticmethod
@njit
def _shannon_numpy(data):
A = np.frombuffer(data, dtype=np.uint8)
pA = np.bincount(A) / len(A)
entropy = -np.nansum(pA*np.log2(pA))
return (entropy / 8)
def gzip(self, data, truncate=True):
'''
Performs an entropy analysis based on zlib compression ratio.
This is faster than the shannon entropy analysis, but not as accurate.
'''
# Entropy is a simple ratio of: <zlib compressed size> / <original
# size>
e = float(float(len(zlib.compress(str2bytes(data), 9))) / float(len(data)))
if truncate and e > 1.0:
e = 1.0
return e
def plot_entropy(self, fname):
try:
import matplotlib.pyplot as plt
except ImportError as e:
return
i = 0
x = []
y = []
plotted_colors = {}
for r in self.results:
x.append(r.offset)
y.append(r.entropy)
fig = plt.figure()
# axisbg is depreciated, but older versions of matplotlib don't support facecolor.
# This tries facecolor first, thus preventing the annoying depreciation warnings,
# and falls back to axisbg if that fails.
try:
ax = fig.add_subplot(1, 1, 1, autoscale_on=True, facecolor='black')
except AttributeError:
ax = fig.add_subplot(1, 1, 1, autoscale_on=True, axisbg='black')
ax.set_title(self.TITLE)
ax.set_xlabel(self.XLABEL)
ax.set_ylabel(self.YLABEL)
ax.plot(x, y, 'y', lw=2)
# Add a fake, invisible plot entry so that offsets at/near the
# minimum x value (0) are actually visible on the plot.
ax.plot(-(max(x)*.001), 1.1, lw=0)
ax.plot(-(max(x)*.001), 0, lw=0)
if self.show_legend and has_key(self.file_markers, fname):
for (offset, description) in self.file_markers[fname]:
# If this description has already been plotted at a different offset, we need to
# use the same color for the marker, but set the description to None to prevent
# duplicate entries in the graph legend.
#
# Else, get the next color and use it to mark descriptions of
# this type.
if has_key(plotted_colors, description):
color = plotted_colors[description]
description = None
else:
color = self.COLORS[i]
plotted_colors[description] = color
i += 1
if i >= len(self.COLORS):
i = 0
ax.plot([offset, offset], [0, 1.1], '%s-' % color, lw=2, label=description)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
if self.save_plot:
self.output_file = os.path.join(os.getcwd(), os.path.basename(fname)) + '.png'
fig.savefig(self.output_file, bbox_inches='tight')
else:
plt.show()
| mit |
kursawe/MCSTracker | test/test_tracking/test_visualise_search.py | 1 | 3905 | # Copyright 2016 Jochen Kursawe. See the LICENSE file at the top-level directory
# of this distribution and at https://github.com/kursawe/MCSTracker/blob/master/LICENSE.
"""This tests our first tracking example
"""
import unittest
import mesh
import tracking
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import os
import copy
import sys
from os import path
from os.path import dirname
from nose.plugins.attrib import attr
class TestTracking(unittest.TestCase):
@attr(level = 'standard')
def test_visualise_search(self):
"""generate a small random mesh and track it.
"""
sys.setrecursionlimit(40000)
mesh_one = mesh.creation.generate_random_tesselation(5,5)
mesh_two = copy.deepcopy(mesh_one)
mesh_one.assign_frame_ids_in_order()
mesh_two.assign_frame_ids_randomly()
tracked_ids = tracking.track(mesh_one, mesh_two)
mesh_two.plot(path.join(dirname(__file__),'output','visualisation_mesh_after_tracking.pdf'), color_by_global_id = True,
total_number_of_global_ids = mesh_one.get_num_elements())
@attr(level = 'standard')
def test_plot_collection_into_separate_figure(self):
"""plot a polygon collection into a figure designed by us
"""
sys.setrecursionlimit(40000)
mesh_one = mesh.creation.generate_random_tesselation(5,5)
print 'start to copy'
mesh_two = copy.deepcopy(mesh_one)
print 'stop to copy'
mesh_one.assign_frame_ids_in_order()
mesh_two.assign_frame_ids_randomly()
tracked_ids = tracking.track(mesh_one, mesh_two)
# figure properties
figuresize = (4,2.75)
font = {'size' : 10}
plt.rc('font', **font)
mesh_figure = plt.figure(figsize = figuresize)
ax = plt.axes([0,0,1,1])
polygon_collection = mesh_two.get_polygon_collection(color_by_global_id = True,
total_number_of_global_ids = mesh_one.get_num_elements())
mesh_figure.gca().add_collection(polygon_collection)
mesh_figure.gca().set_aspect('equal')
mesh_figure.gca().autoscale_view()
plt.axis('off')
filename = path.join(dirname(__file__),'output','own_visualisation_figure.pdf')
mesh_figure.savefig(filename, bbox_inches = 'tight')
def test_plot_all_global_ids_same_color(self):
"""plot all global ids same color
"""
sys.setrecursionlimit(40000)
mesh_one = mesh.creation.generate_random_tesselation(8,8)
mesh_two = copy.deepcopy(mesh_one)
mesh_one.assign_frame_ids_in_order()
mesh_two.assign_frame_ids_randomly()
# Perform T1 swap on mesh two
# First pick a node in the centre
mesh_centre = mesh_two.calculate_centre()
# pick the node closest to the centre
most_central_node = mesh_two.find_most_central_node()
# pick a node that shares an edge with this central node
for local_index, element_node in enumerate(most_central_node.adjacent_elements[0].nodes):
if element_node.id == most_central_node.id:
num_nodes_this_element = most_central_node.adjacent_elements[0].get_num_nodes()
one_edge_node = most_central_node.adjacent_elements[0].nodes[(local_index+1)%num_nodes_this_element]
break
mesh_two.perform_t1_swap( most_central_node.id, one_edge_node.id )
tracked_ids = tracking.find_maximum_common_subgraph(mesh_one, mesh_two)
mesh_two.plot(path.join(dirname(__file__),'output','visualisation_in_green.pdf'), color_by_global_id = 'g',
total_number_of_global_ids = mesh_one.get_num_elements()) | bsd-3-clause |
MRSDTeamI/bud-e | ROS/src/rgbdslam_v2-hydro/rgbd_benchmark/evaluate_ate.py | 4 | 7523 | #!/usr/bin/python
#
# Requirements:
# sudo apt-get install python-argparse
import sys
import numpy
import argparse
import associate
def align_first(model,data):
numpy.set_printoptions(precision=3,suppress=True)
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = numpy.zeros( (3,3) )
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column])
U,d,Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity( 3 ))
if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0):
S[2,2] = -1
rot = U*S*Vh
print data.shape
trans = data[:,0] - model[:,0]
model_aligned = rot * model + trans
alignment_error = model_aligned - data
trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0]
return rot,trans,trans_error
def align(model,data):
numpy.set_printoptions(precision=3,suppress=True)
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = numpy.zeros( (3,3) )
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column])
U,d,Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity( 3 ))
if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0):
S[2,2] = -1
rot = U*S*Vh
trans = data.mean(1) - rot * model.mean(1)
model_aligned = rot * model + trans
alignment_error = model_aligned - data
trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0]
return rot,trans,trans_error
def plot_traj(ax,stamps,traj,style,color,label, linewidth):
stamps.sort()
interval = numpy.median([s-t for s,t in zip(stamps[1:],stamps[:-1])])
x = []
y = []
last = stamps[0]
for i in range(len(stamps)):
if stamps[i]-last < 5*interval:
x.append(traj[i][0])
y.append(traj[i][1])
elif len(x)>0:
ax.plot(x,y,style,color=color,label=label, linewidth=linewidth)
label=""
x=[]
y=[]
last= stamps[i]
if len(x)>0:
ax.plot(x,y,style,color=color,label=label, linewidth=linewidth)
if __name__=="__main__":
# parse command line
parser = argparse.ArgumentParser(description='''
This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the second trajectory (default: 1.0)',default=1.0)
parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
parser.add_argument('--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)')
parser.add_argument('--save_associations', help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)')
parser.add_argument('--plot', help='plot the first and the aligned second trajectory to an image (format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)', action='store_true')
args = parser.parse_args()
first_list = associate.read_file_list(args.first_file)
second_list = associate.read_file_list(args.second_file)
matches = associate.associate(first_list, second_list,float(args.offset),float(args.max_difference))
if len(matches)<2:
sys.exit("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?")
first_xyz = numpy.matrix([[float(value) for value in first_list[a][0:3]] for a,b in matches]).transpose()
second_xyz = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for a,b in matches]).transpose()
rot,trans,trans_error = align(second_xyz,first_xyz)
#rot,trans,trans_error = align_first(second_xyz,first_xyz)
second_xyz_aligned = rot * second_xyz + trans
first_stamps = first_list.keys()
first_stamps.sort()
first_xyz_full = numpy.matrix([[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose()
second_stamps = second_list.keys()
second_stamps.sort()
second_xyz_full = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for b in second_stamps]).transpose()
second_xyz_full_aligned = rot * second_xyz_full + trans
z_coords = numpy.array(second_xyz_full_aligned[2])
rmse = numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
if args.verbose:
print "Z Min: ", numpy.min(z_coords);
print "Z Max: ", numpy.max(z_coords);
print "Z Std: ", numpy.std(z_coords);
print "RMSE: ", numpy.sqrt(numpy.sum(z_coords*z_coords) / z_coords.shape[1])
print "compared_pose_pairs %d pairs"%(len(trans_error))
print "absolute_translational_error.rmse %f m"%rmse
print "absolute_translational_error.mean %f m"%numpy.mean(trans_error)
print "absolute_translational_error.median %f m"%numpy.median(trans_error)
print "absolute_translational_error.std %f m"%numpy.std(trans_error)
print "absolute_translational_error.min %f m"%numpy.min(trans_error)
print "absolute_translational_error.max %f m"%numpy.max(trans_error)
else:
print "%f"%rmse
if args.save_associations:
file = open(args.save_associations,"w")
file.write("\n".join(["%f %f %f %f %f %f %f %f"%(a,x1,y1,z1,b,x2,y2,z2) for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A)]))
file.close()
if args.save:
file = open(args.save,"w")
file.write("\n".join(["%f "%stamp+" ".join(["%f"%d for d in line]) for stamp,line in zip(second_stamps,second_xyz_full_aligned.transpose().A)]))
file.close()
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.patches import Ellipse
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
plt.title("ATE RMSE: "+str(rmse))
plot_traj(ax,first_stamps,first_xyz_full.transpose().A,'-',"black","Ground Truth", linewidth=2)
plot_traj(ax,second_stamps,second_xyz_full_aligned.transpose().A,'-',"green","Unmodified RGB-D Sensor", linewidth=2)
label="Difference"
i = 0
for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A):
i+=1
if i % 10 == 0:
ax.plot([x1,x2],[y1,y2],'-',color="red",label=label, linewidth=1)
label=""
ax.legend(loc="upper left")
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
plt.savefig(args.plot,dpi=300)
| apache-2.0 |
dominikwille/comp | sheet13/sheet13.py | 1 | 4782 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
#
# @author Dominik Wille
# @author Stefan Pojtinger
# @tutor Alexander Schlaich
# @sheet 13
#
#Packete einlesen:
import numpy as np
import os
import csv
import matplotlib.pyplot as plt
import random as rnd
#Aufgabe 13.1.1
#Für das integral von 0 bis delta ist x << 1 und daher
#kann 1 + x nach taylor als e**x genähert werden.
#Das Inegral ist somit 2*sqrt(delta).
def f(x):
return 1.0/np.sqrt(np.log(1+x))
def hit_or_miss(f, a, b, N, f_min = None, f_max = None):
if(f_min is None):
f_min = f(b)
if(f_max is None):
f_max = f(a)
N_pos = 0
for i in range(N):
r = rnd.random()
s = rnd.random()
if(f(a + (b - a) * r) - f_min > (f_max - f_min) * s):
N_pos += 1
return (b - a) * (float(N_pos) / float(N) * (f_max - f_min) + f_min)
# delta = 10e-3
# a = delta
# b = 1
# N = 1000
# I = []
# for i in range(10):
# I.append(hit_or_miss(f, a, b, N) + 2*np.sqrt(delta))
# print np.average(I)
# print np.std(I)
#Durch 100 mal mehr schritte wird die Standardabweichung
#ca 10 mal kleiner.
#Aufgabe 13.1.2
#Der grenzwert für x->0 ist für f(x)/g(x) ist 1,
#weil man 1 + x wieder als e**x nähern kann.
def f2(y):
return 1.0/np.sqrt(np.log(1+y*2/4.0))*y/2
a = 0
b = 2
N = 100000
I = []
# for i in range(10):
# I.append(hit_or_miss(f2, a, b, N, f_max = 1))
# print np.average(I)
# print np.std(I)
#Die Standardabweichung ist um midestens eine Größenordnung
#(10 mal) kleiner als in 13.1.1. Bemerkswert ist auch,
#dass das Integral einen merklich anderen wert annimmt.
#evtl fehlerhafte implementierung???
#Aufgabe 13.2
def rand_m(N):
X = np.matrix([[0]*N]*N)
for i in range(N):
for j in range(N):
X[i,j] = rnd.choice([-3,-1,1,3])
return X
def repeat(x, N):
while x < 0:
x += N
while x >= N:
x -= N
return x
def flip(x):
if(x == -3):
return -1
elif(x == 3):
return 1
else:
return x + rnd.choice([-2, 2])
def getdE(X, (y1, y2), n_element, H, J):
dE = 0
dE += - H * n_element / 2.0
dE -= - H * X[y1, y2] / 2.0
if(y1 > 0):
dE += - J * X[y1 - 1, y2] * n_element / 8.0
dE -= - J * X[y1 - 1, y2] * X[y1, y2] / 8.0
if(y1 < N - 1):
dE += - J * X[y1 + 1, y2] * n_element / 8.0
dE -= - J * X[y1 + 1, y2] * X[y1, y2] / 8.0
if(y2 > 0):
dE += - J * X[y1, y2 - 1] * n_element / 8.0
dE -= - J * X[y1, y2 - 1] * X[y1, y2] / 8.0
if(y2 < N - 1):
dE += - J * X[y1, y2 + 1] * n_element / 8.0
dE -= - J * X[y1, y2 + 1] * X[y1, y2] / 8.0
return dE
def metropolis((x1, x2), X, kT, H, J=1.0, r=1.0, steps=10):
N = len(X)
for i in range(steps):
(q1, q2) = (int(round(rnd.random() * 2 * r - r)), int(round(rnd.random() * 2 * r - r)))
(y1, y2) = (repeat(x1 + q1, N), repeat(x2 + q2, N))
n_element = flip(X[y1,y2])
dE = getdE(X, (y1, y2), n_element, H, J)
dM = n_element - X[y1, y2]
if(dE < 0):
X[y1, y2] = n_element
(x1, x2) = (y1, y2)
else:
p_A = min(1.0, np.exp(-dE/kT))
if(rnd.random() < p_A):
X[y1, y2] = n_element
(x1, x2) = (y1, y2)
return X
def getM(X):
N = len(X)
M = 0
for i in range(N):
for j in range(N):
M += X[i,j]
return M / 2.0
def getE(X, H):
# print H
N = len(X)
E = 0
for i in range(N):
for j in range(N):
E += -H * X[i,j] / 2.0
if(i > 0):
E += -X[i - 1, j] * X[i,j] / 8.0
if(i < N - 1):
E += -X[i + 1, j] * X[i,j] / 8.0
if(j > 0):
E += -X[i, j - 1] * X[i,j] / 8.0
if(j < N - 1):
E += -X[i, j + 1] * X[i,j] / 8.0
return E
N = 10
kT_list = [1e-3, 1e-1, 1.0, 2.0, 5.0, 10.0, 100.0]
H_list = [0, 0.1, 1.0]
# X = rand_m(N)
# kT = kT_list[6]
# H = H_list[2]
# (X, E, M) = metropolis((4, 4), X, kT, H, steps=25000)
# plt.imshow(X, extent=(0, N, N, 0), interpolation='nearest', cmap=plt.cm.jet)
# plt.title('$H = ' + str(H) + '\,\,\,;\,\,\, kT = ' + str(kT) + '$')
# plt.show()
E = []
M = []
E_ = 0
M_ = 0
H = H_list[2]
times = 10
for kT in kT_list:
for n in range(times):
X = rand_m(N)
X = metropolis((4, 4), X, kT, H, steps=25000)
print str(kT) + ', ' + str(H) + ', ' + str(getE(X, H)) + ', ' + str(getM(X))
M_ += getM(X) / times
E_ += getE(X, H) / times
M.append(M_)
E.append(E_)
plt.plot(kT_list, E)
plt.plot(kT_list, M)
plt.legend(['E', 'M'])
plt.xscale('log')
plt.title('$H = ' + str(H) + '$')
plt.xlabel('$kT$')
plt.ylabel('$M/E$')
plt.show()
| bsd-2-clause |
gewaltig/cython-neuron | testsuite/manualtests/cross_check_test_mip_corrdet.py | 2 | 2594 | #! /usr/bin/env python
#
# cross_check_test_mip_corrdet.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Script to check correlation_detector.
# Calculates spike cross correlation function of both spike trains in
# spike_detector-0-0-3.gdf. The file is generated after running the
# testscript testsuite/unittests/test_mip_corrdet.sli
#
# Author: Helias
# Date: 08-04-07
#
from scipy import *
from matplotlib.pylab import * # for plot
# Auto- and crosscorrelation functions for spike trains.
#
# A time bin of size tbin is centered around the time difference it
# represents If the correlation function is calculated for tau in
# [-tau_max, tau_max], the pair events contributing to the left-most
# bin are those for which tau in [-tau_max-tbin/2, tau_max+tbin/2) and
# so on.
# correlate two spike trains with each other
# assumes spike times to be ordered in time
# tau > 0 means spike2 is later than spike1
#
# tau_max: maximum time lag in ms correlation function
# tbin: bin size
# spike1: first spike train [tspike...]
# spike2: second spike train [tspike...]
#
def corr_spikes_sorted(spike1, spike2, tbin, tau_max, h):
tau_max_i = int(tau_max/h)
tbin_i = int(tbin/h)
cross = zeros(int(2*tau_max_i/tbin_i+1), 'd')
j0 = 0
for spki in spike1:
j = j0
while j < len(spike2) and spike2[j] - spki < -tau_max_i - tbin_i/2.0:
j += 1
j0 = j
while j < len(spike2) and spike2[j] - spki < tau_max_i + tbin_i/2.0:
cross[int((spike2[j] - spki + tau_max_i + 0.5*tbin_i)/tbin_i)] += 1.0
j += 1
return cross
def main():
# resolution
h = 0.1
tau_max = 100.0 # ms correlation window
t_bin = 10.0 # ms bin size
# read input from spike detector
spikes = load('spike_detector-0-0-3.gdf')
sp1 = spikes[find(spikes[:,0] == 4), 1]
sp2 = spikes[find(spikes[:,0] == 5), 1]
cross = corr_spikes_sorted(sp1, sp2, t_bin, tau_max, h)
print cross
print sum(cross)
main()
| gpl-2.0 |
silky/sms-tools | lectures/09-Sound-description/plots-code/hpcp.py | 25 | 1194 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
spectralPeaks = ess.SpectralPeaks()
hpcp = ess.HPCP()
x = ess.MonoLoader(filename = '../../../sounds/cello-double.wav', sampleRate = fs)()
hpcps = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
spectralPeaks_freqs, spectralPeaks_mags = spectralPeaks(mX)
hpcp_vals = hpcp(spectralPeaks_freqs, spectralPeaks_mags)
hpcps.append(hpcp_vals)
hpcps = np.array(hpcps)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (cello-double.wav)')
plt.subplot(2,1,2)
numFrames = int(hpcps[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, np.arange(12), np.transpose(hpcps))
plt.ylabel('spectral bins')
plt.title('HPCP')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('hpcp.png')
plt.show()
| agpl-3.0 |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_cont_NoRot/Geneva_cont_NoRot_0/fullgrid/UV1.py | 31 | 9315 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [0, #977
1, #991
2, #1026
5, #1216
91, #1218
6, #1239
7, #1240
8, #1243
9, #1263
10, #1304
11,#1308
12, #1397
13, #1402
14, #1406
16, #1486
17] #1531
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty UV Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_UV_Lines.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
robbymeals/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
anntzer/scikit-learn | sklearn/linear_model/_theil_sen.py | 8 | 14803 | # -*- coding: utf-8 -*-
"""
A Theil-Sen Estimator for Multiple Linear Regression Model
"""
# Author: Florian Wilhelm <[email protected]>
#
# License: BSD 3 clause
import warnings
from itertools import combinations
import numpy as np
from scipy import linalg
from scipy.special import binom
from scipy.linalg.lapack import get_lapack_funcs
from joblib import Parallel, effective_n_jobs
from ._base import LinearModel
from ..base import RegressorMixin
from ..utils import check_random_state
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
from ..exceptions import ConvergenceWarning
_EPSILON = np.finfo(np.double).eps
def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
x_old : ndarray of shape = (n_features,)
Current start vector.
Returns
-------
x_new : ndarray of shape (n_features,)
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
diff = X - x_old
diff_norm = np.sqrt(np.sum(diff ** 2, axis=1))
mask = diff_norm >= _EPSILON
# x_old equals one of our samples
is_x_old_in_X = int(mask.sum() < X.shape[0])
diff = diff[mask]
diff_norm = diff_norm[mask][:, np.newaxis]
quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))
if quotient_norm > _EPSILON: # to avoid division by zero
new_direction = (np.sum(X[mask, :] / diff_norm, axis=0)
/ np.sum(1 / diff_norm, axis=0))
else:
new_direction = 1.
quotient_norm = 1.
return (max(0., 1. - is_x_old_in_X / quotient_norm) * new_direction
+ min(1., is_x_old_in_X / quotient_norm) * x_old)
def _spatial_median(X, max_iter=300, tol=1.e-3):
"""Spatial median (L1 median).
The spatial median is member of a class of so-called M-estimators which
are defined by an optimization problem. Given a number of p points in an
n-dimensional space, the point x minimizing the sum of all distances to the
p other points is called spatial median.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
max_iter : int, default=300
Maximum number of iterations.
tol : float, default=1.e-3
Stop the algorithm if spatial_median has converged.
Returns
-------
spatial_median : ndarray of shape = (n_features,)
Spatial median.
n_iter : int
Number of iterations needed.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
if X.shape[1] == 1:
return 1, np.median(X.ravel(), keepdims=True)
tol **= 2 # We are computing the tol on the squared norm
spatial_median_old = np.mean(X, axis=0)
for n_iter in range(max_iter):
spatial_median = _modified_weiszfeld_step(X, spatial_median_old)
if np.sum((spatial_median_old - spatial_median) ** 2) < tol:
break
else:
spatial_median_old = spatial_median
else:
warnings.warn("Maximum number of iterations {max_iter} reached in "
"spatial median for TheilSen regressor."
"".format(max_iter=max_iter), ConvergenceWarning)
return n_iter, spatial_median
def _breakdown_point(n_samples, n_subsamples):
"""Approximation of the breakdown point.
Parameters
----------
n_samples : int
Number of samples.
n_subsamples : int
Number of subsamples to consider.
Returns
-------
breakdown_point : float
Approximation of breakdown point.
"""
return 1 - (0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) +
n_subsamples - 1) / n_samples
def _lstsq(X, y, indices, fit_intercept):
"""Least Squares Estimator for TheilSenRegressor class.
This function calculates the least squares method on a subset of rows of X
and y defined by the indices array. Optionally, an intercept column is
added if intercept is set to true.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Design matrix, where n_samples is the number of samples and
n_features is the number of features.
y : ndarray of shape (n_samples,)
Target vector, where n_samples is the number of samples.
indices : ndarray of shape (n_subpopulation, n_subsamples)
Indices of all subsamples with respect to the chosen subpopulation.
fit_intercept : bool
Fit intercept or not.
Returns
-------
weights : ndarray of shape (n_subpopulation, n_features + intercept)
Solution matrix of n_subpopulation solved least square problems.
"""
fit_intercept = int(fit_intercept)
n_features = X.shape[1] + fit_intercept
n_subsamples = indices.shape[1]
weights = np.empty((indices.shape[0], n_features))
X_subpopulation = np.ones((n_subsamples, n_features))
# gelss need to pad y_subpopulation to be of the max dim of X_subpopulation
y_subpopulation = np.zeros((max(n_subsamples, n_features)))
lstsq, = get_lapack_funcs(('gelss',), (X_subpopulation, y_subpopulation))
for index, subset in enumerate(indices):
X_subpopulation[:, fit_intercept:] = X[subset, :]
y_subpopulation[:n_subsamples] = y[subset]
weights[index] = lstsq(X_subpopulation,
y_subpopulation)[1][:n_features]
return weights
class TheilSenRegressor(RegressorMixin, LinearModel):
"""Theil-Sen Estimator: robust multivariate regression model.
The algorithm calculates least square solutions on subsets with size
n_subsamples of the samples in X. Any value of n_subsamples between the
number of features and samples leads to an estimator with a compromise
between robustness and efficiency. Since the number of least square
solutions is "n_samples choose n_subsamples", it can be extremely large
and can therefore be limited with max_subpopulation. If this limit is
reached, the subsets are chosen randomly. In a final step, the spatial
median (or L1 median) is calculated of all least square solutions.
Read more in the :ref:`User Guide <theil_sen_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_subpopulation : int, default=1e4
Instead of computing with a set of cardinality 'n choose k', where n is
the number of samples and k is the number of subsamples (at least
number of features), consider only a stochastic subpopulation of a
given maximal size if 'n choose k' is larger than max_subpopulation.
For other than small problem sizes this parameter will determine
memory usage and runtime if n_subsamples is not changed.
n_subsamples : int, default=None
Number of samples to calculate the parameters. This is at least the
number of features (plus 1 if fit_intercept=True) and the number of
samples as a maximum. A lower number leads to a higher breakdown
point and a low efficiency while a high number leads to a low
breakdown point and a high efficiency. If None, take the
minimum number of subsamples leading to maximal robustness.
If n_subsamples is set to n_samples, Theil-Sen is identical to least
squares.
max_iter : int, default=300
Maximum number of iterations for the calculation of spatial median.
tol : float, default=1.e-3
Tolerance when calculating spatial median.
random_state : int, RandomState instance or None, default=None
A random number generator instance to define the state of the random
permutations generator. Pass an int for reproducible output across
multiple function calls.
See :term:`Glossary <random_state>`
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : ndarray of shape (n_features,)
Coefficients of the regression model (median of distribution).
intercept_ : float
Estimated intercept of regression model.
breakdown_ : float
Approximated breakdown point.
n_iter_ : int
Number of iterations needed for the spatial median.
n_subpopulation_ : int
Number of combinations taken into account from 'n choose k', where n is
the number of samples and k is the number of subsamples.
Examples
--------
>>> from sklearn.linear_model import TheilSenRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(
... n_samples=200, n_features=2, noise=4.0, random_state=0)
>>> reg = TheilSenRegressor(random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9884...
>>> reg.predict(X[:1,])
array([-31.5871...])
References
----------
- Theil-Sen Estimators in a Multiple Linear Regression Model, 2009
Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang
http://home.olemiss.edu/~xdang/papers/MTSE.pdf
"""
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, copy_X=True,
max_subpopulation=1e4, n_subsamples=None, max_iter=300,
tol=1.e-3, random_state=None, n_jobs=None, verbose=False):
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.max_subpopulation = int(max_subpopulation)
self.n_subsamples = n_subsamples
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
def _check_subparams(self, n_samples, n_features):
n_subsamples = self.n_subsamples
if self.fit_intercept:
n_dim = n_features + 1
else:
n_dim = n_features
if n_subsamples is not None:
if n_subsamples > n_samples:
raise ValueError("Invalid parameter since n_subsamples > "
"n_samples ({0} > {1}).".format(n_subsamples,
n_samples))
if n_samples >= n_features:
if n_dim > n_subsamples:
plus_1 = "+1" if self.fit_intercept else ""
raise ValueError("Invalid parameter since n_features{0} "
"> n_subsamples ({1} > {2})."
"".format(plus_1, n_dim, n_samples))
else: # if n_samples < n_features
if n_subsamples != n_samples:
raise ValueError("Invalid parameter since n_subsamples != "
"n_samples ({0} != {1}) while n_samples "
"< n_features.".format(n_subsamples,
n_samples))
else:
n_subsamples = min(n_dim, n_samples)
if self.max_subpopulation <= 0:
raise ValueError("Subpopulation must be strictly positive "
"({0} <= 0).".format(self.max_subpopulation))
all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
n_subpopulation = int(min(self.max_subpopulation, all_combinations))
return n_subsamples, n_subpopulation
def fit(self, X, y):
"""Fit linear model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
random_state = check_random_state(self.random_state)
X, y = self._validate_data(X, y, y_numeric=True)
n_samples, n_features = X.shape
n_subsamples, self.n_subpopulation_ = self._check_subparams(n_samples,
n_features)
self.breakdown_ = _breakdown_point(n_samples, n_subsamples)
if self.verbose:
print("Breakdown point: {0}".format(self.breakdown_))
print("Number of samples: {0}".format(n_samples))
tol_outliers = int(self.breakdown_ * n_samples)
print("Tolerable outliers: {0}".format(tol_outliers))
print("Number of subpopulations: {0}".format(
self.n_subpopulation_))
# Determine indices of subpopulation
if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation:
indices = list(combinations(range(n_samples), n_subsamples))
else:
indices = [random_state.choice(n_samples, size=n_subsamples,
replace=False)
for _ in range(self.n_subpopulation_)]
n_jobs = effective_n_jobs(self.n_jobs)
index_list = np.array_split(indices, n_jobs)
weights = Parallel(n_jobs=n_jobs,
verbose=self.verbose)(
delayed(_lstsq)(X, y, index_list[job], self.fit_intercept)
for job in range(n_jobs))
weights = np.vstack(weights)
self.n_iter_, coefs = _spatial_median(weights,
max_iter=self.max_iter,
tol=self.tol)
if self.fit_intercept:
self.intercept_ = coefs[0]
self.coef_ = coefs[1:]
else:
self.intercept_ = 0.
self.coef_ = coefs
return self
| bsd-3-clause |
bigdataelephants/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
artdavis/eggloft | eggcarrier/data/parse_profile.py | 1 | 1134 | #! /usr/bin/env python
"""
:mod:`parse_profile` -- Convert unstructured profile data to xy coords
===============================================================================
.. module:: parse_profile
:synopsis: Convert unstructured profile data to xy coords
.. moduleauthor:: Arthur Davis <[email protected]>
Copyright 2017 Arthur Davis ([email protected])
Licensed under the MIT License
2017-04-02 - File creation
"""
import os
import numpy as np
import pandas as pd
# Setup to use breakpt() for droppping into ipdb:
import IPython
breakpt = IPython.core.debugger.set_trace
CWD=os.path.dirname(os.path.abspath(__file__))
MODNAME = os.path.splitext(os.path.basename(__file__))[0]
with open('egg-profile_data.csv', 'r') as fid:
raw = fid.read().strip()
i = iter(raw.split(','))
x = []
y = []
try:
while True:
x.append(float(next(i)))
y.append(float(next(i)))
except StopIteration:
pass
xpts = np.array(x)
ypts = -np.array(y)
# Set zero origin
xpts = xpts - np.min(xpts)
ypts = ypts - np.min(ypts)
df = pd.DataFrame({'x': xpts, 'y': ypts})
df.to_csv('egg-profile_xy.csv', index=False)
| mit |
derricw/pyqtgraph | pyqtgraph/widgets/MatplotlibWidget.py | 30 | 1442 | from ..Qt import QtGui, QtCore, USE_PYSIDE, USE_PYQT5
import matplotlib
if not USE_PYQT5:
if USE_PYSIDE:
matplotlib.rcParams['backend.qt4']='PySide'
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
else:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MatplotlibWidget(QtGui.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
QtGui.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
| mit |
diegocavalca/Studies | programming/Python/Machine-Learning/Introduction-Udacity/final_project/tester.py | 14 | 4509 | #!/usr/bin/pickle
""" a basic script for importing student's POI identifier,
and checking the results that they get from it
requires that the algorithm, dataset, and features list
be written to my_classifier.pkl, my_dataset.pkl, and
my_feature_list.pkl, respectively
that process should happen at the end of poi_id.py
"""
import pickle
import sys
from sklearn.cross_validation import StratifiedShuffleSplit
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
PERF_FORMAT_STRING = "\
\tAccuracy: {:>0.{display_precision}f}\tPrecision: {:>0.{display_precision}f}\t\
Recall: {:>0.{display_precision}f}\tF1: {:>0.{display_precision}f}\tF2: {:>0.{display_precision}f}"
RESULTS_FORMAT_STRING = "\tTotal predictions: {:4d}\tTrue positives: {:4d}\tFalse positives: {:4d}\
\tFalse negatives: {:4d}\tTrue negatives: {:4d}"
def test_classifier(clf, dataset, feature_list, folds = 1000):
data = featureFormat(dataset, feature_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
cv = StratifiedShuffleSplit(labels, folds, random_state = 42)
true_negatives = 0
false_negatives = 0
true_positives = 0
false_positives = 0
for train_idx, test_idx in cv:
features_train = []
features_test = []
labels_train = []
labels_test = []
for ii in train_idx:
features_train.append( features[ii] )
labels_train.append( labels[ii] )
for jj in test_idx:
features_test.append( features[jj] )
labels_test.append( labels[jj] )
### fit the classifier using training set, and test on test set
clf.fit(features_train, labels_train)
predictions = clf.predict(features_test)
for prediction, truth in zip(predictions, labels_test):
if prediction == 0 and truth == 0:
true_negatives += 1
elif prediction == 0 and truth == 1:
false_negatives += 1
elif prediction == 1 and truth == 0:
false_positives += 1
elif prediction == 1 and truth == 1:
true_positives += 1
else:
print "Warning: Found a predicted label not == 0 or 1."
print "All predictions should take value 0 or 1."
print "Evaluating performance for processed predictions:"
break
try:
total_predictions = true_negatives + false_negatives + false_positives + true_positives
accuracy = 1.0*(true_positives + true_negatives)/total_predictions
precision = 1.0*true_positives/(true_positives+false_positives)
recall = 1.0*true_positives/(true_positives+false_negatives)
f1 = 2.0 * true_positives/(2*true_positives + false_positives+false_negatives)
f2 = (1+2.0*2.0) * precision*recall/(4*precision + recall)
print clf
print PERF_FORMAT_STRING.format(accuracy, precision, recall, f1, f2, display_precision = 5)
print RESULTS_FORMAT_STRING.format(total_predictions, true_positives, false_positives, false_negatives, true_negatives)
print ""
except:
print "Got a divide by zero when trying out:", clf
print "Precision or recall may be undefined due to a lack of true positive predicitons."
CLF_PICKLE_FILENAME = "my_classifier.pkl"
DATASET_PICKLE_FILENAME = "my_dataset.pkl"
FEATURE_LIST_FILENAME = "my_feature_list.pkl"
def dump_classifier_and_data(clf, dataset, feature_list):
with open(CLF_PICKLE_FILENAME, "w") as clf_outfile:
pickle.dump(clf, clf_outfile)
with open(DATASET_PICKLE_FILENAME, "w") as dataset_outfile:
pickle.dump(dataset, dataset_outfile)
with open(FEATURE_LIST_FILENAME, "w") as featurelist_outfile:
pickle.dump(feature_list, featurelist_outfile)
def load_classifier_and_data():
with open(CLF_PICKLE_FILENAME, "r") as clf_infile:
clf = pickle.load(clf_infile)
with open(DATASET_PICKLE_FILENAME, "r") as dataset_infile:
dataset = pickle.load(dataset_infile)
with open(FEATURE_LIST_FILENAME, "r") as featurelist_infile:
feature_list = pickle.load(featurelist_infile)
return clf, dataset, feature_list
def main():
### load up student's classifier, dataset, and feature_list
clf, dataset, feature_list = load_classifier_and_data()
### Run testing script
test_classifier(clf, dataset, feature_list)
if __name__ == '__main__':
main()
| cc0-1.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/numpydoc/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| gpl-3.0 |
geektoni/shogun | applications/easysvm/esvm/plots.py | 7 | 6555 | """
This module contains code for commonly used plots
"""
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Soeren Sonnenburg
import sys
import random
import numpy
import warnings
import shutil
from shogun import Labels
from shogun import *
def plotroc(output, LTE, draw_random=False, figure_fname="", roc_label='ROC'):
"""Plot the receiver operating characteristic curve"""
import pylab
import matplotlib
pylab.figure(1,dpi=150,figsize=(4,4))
fontdict=dict(family="cursive",weight="bold",size=7,y=1.05) ;
pm=PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
points=pm.get_ROC()
points=numpy.array(points).T # for pylab.plot
pylab.plot(points[0], points[1], 'b-', label=roc_label)
if draw_random:
pylab.plot([0, 1], [0, 1], 'r-', label='random guessing')
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('1 - specificity (false positive rate)',size=10)
pylab.ylabel('sensitivity (true positive rate)',size=10)
pylab.legend(loc='lower right', prop = matplotlib.font_manager.FontProperties('tiny'))
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auROC=pm.get_auROC()
return auROC ;
def plotprc(output, LTE, figure_fname="", prc_label='PRC'):
"""Plot the precision recall curve"""
import pylab
import matplotlib
pylab.figure(2,dpi=150,figsize=(4,4))
pm=PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
points=pm.get_PRC()
points=numpy.array(points).T # for pylab.plot
pylab.plot(points[0], points[1], 'b-', label=prc_label)
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('sensitivity (true positive rate)',size=10)
pylab.ylabel('precision (1 - false discovery rate)',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auPRC=pm.get_auPRC()
return auPRC ;
def plotcloud(cloud, figure_fname="", label='cloud'):
"""Plot the cloud of points (the first two dimensions only)"""
import pylab
import matplotlib
pylab.figure(1,dpi=150,figsize=(4,4))
pos = []
neg = []
for i in xrange(len(cloud)):
if cloud[i][0]==1:
pos.append(cloud[i][1:])
elif cloud[i][0]==-1:
neg.append(cloud[i][1:])
fontdict=dict(family="cursive",weight="bold",size=10,y=1.05) ;
pylab.title(label, fontdict)
points=numpy.array(pos).T # for pylab.plot
pylab.plot(points[0], points[1], 'b+', label='positive')
points=numpy.array(neg).T # for pylab.plot
pylab.plot(points[0], points[1], 'rx', label='negative')
#pylab.axis([0, 1, 0, 1])
#ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
#pylab.xticks(ticks,size=10)
#pylab.yticks(ticks,size=10)
pylab.xlabel('dimension 1',size=10)
pylab.ylabel('dimension 2',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
def plot_poims(poimfilename, poim, max_poim, diff_poim, poim_totalmass, poimdegree, max_len):
"""Plot a summary of the information in poims"""
import pylab
import matplotlib
pylab.figure(3, dpi=150, figsize=(4,5))
# summary figures
fontdict=dict(family="cursive",weight="bold",size=7,y=1.05) ;
pylab.subplot(3,2,1)
pylab.title('Total POIM Mass', fontdict)
pylab.plot(poim_totalmass) ;
pylab.ylabel('weight mass', size=5)
pylab.subplot(3,2,3)
pylab.title('POIMs', fontdict)
pylab.pcolor(max_poim, shading='flat') ;
pylab.subplot(3,2,5)
pylab.title('Differential POIMs', fontdict)
pylab.pcolor(diff_poim, shading='flat') ;
for plot in [3, 5]:
pylab.subplot(3,2,plot)
ticks=numpy.arange(1., poimdegree+1, 1, dtype=numpy.float64)
ticks_str = []
for i in xrange(0, poimdegree):
ticks_str.append("%i" % (i+1))
ticks[i] = i + 0.5
pylab.yticks(ticks, ticks_str)
pylab.ylabel('degree', size=5)
# per k-mer figures
fontdict=dict(family="cursive",weight="bold",size=7,y=1.04) ;
# 1-mers
pylab.subplot(3,2,2)
pylab.title('1-mer Positional Importance', fontdict)
pylab.pcolor(poim[0], shading='flat') ;
ticks_str = ['A', 'C', 'G', 'T']
ticks = [0.5, 1.5, 2.5, 3.5]
pylab.yticks(ticks, ticks_str, size=5)
pylab.axis([0, max_len, 0, 4])
# 2-mers
pylab.subplot(3,2,4)
pylab.title('2-mer Positional Importance', fontdict)
pylab.pcolor(poim[1], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
ticks_str.append(l1+l2)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 16])
# 3-mers
pylab.subplot(3,2,6)
pylab.title('3-mer Positional Importance', fontdict)
pylab.pcolor(poim[2], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
for l3 in ['A', 'C', 'G', 'T']:
if numpy.mod(i,4)==0:
ticks_str.append(l1+l2+l3)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 64])
# x-axis on last two figures
for plot in [5, 6]:
pylab.subplot(3,2,plot)
pylab.xlabel('sequence position', size=5)
# finishing up
for plot in xrange(0,6):
pylab.subplot(3,2,plot+1)
pylab.xticks(fontsize=5)
for plot in [1,3,5]:
pylab.subplot(3,2,plot)
pylab.yticks(fontsize=5)
pylab.subplots_adjust(hspace=0.35) ;
# write to file
warnings.filterwarnings('ignore','Could not match*')
pylab.savefig('/tmp/temppylabfig.png')
shutil.move('/tmp/temppylabfig.png',poimfilename)
| bsd-3-clause |
jmmease/pandas | pandas/tests/series/test_quantile.py | 5 | 6023 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import Index, Series
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.dtypes.common import is_integer
import pandas.util.testing as tm
from .common import TestData
class TestSeriesQuantile(TestData):
def test_quantile(self):
q = self.ts.quantile(0.1)
assert q == np.percentile(self.ts.valid(), 10)
q = self.ts.quantile(0.9)
assert q == np.percentile(self.ts.valid(), 90)
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
assert q == np.percentile(self.ts.valid(), 90)
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
assert q == Timestamp('2000-01-10 19:12:00')
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
assert q == pd.to_timedelta('24:00:00')
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
assert result is pd.NaT
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assert_raises_regex(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([np.percentile(self.ts.valid(), 10),
np.percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
tm.assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
tm.assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index(
[], dtype=float))
tm.assert_series_equal(result, expected)
def test_quantile_interpolation(self):
# see gh-10174
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
assert q == np.percentile(self.ts.valid(), 10)
q1 = self.ts.quantile(0.1)
assert q1 == np.percentile(self.ts.valid(), 10)
# test with and without interpolation keyword
assert q == q1
def test_quantile_interpolation_dtype(self):
# GH #10174
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
assert result == expected
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan],
index=[0.2, 0.3]))
def test_quantile_box(self):
cases = [[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')],
# NaT
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'), pd.NaT],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern'), pd.NaT],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days'), pd.NaT]]
for case in cases:
s = pd.Series(case, name='XXX')
res = s.quantile(0.5)
assert res == case[1]
res = s.quantile([0.5])
exp = pd.Series([case[1]], index=[0.5], name='XXX')
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
# covers #9694
assert pd.isna(Series([], dtype='M8[ns]').quantile(.5))
assert pd.isna(Series([], dtype='m8[ns]').quantile(.5))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
def test_quantile_empty(self):
# floats
s = Series([], dtype='float64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# int
s = Series([], dtype='int64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# datetime
s = Series([], dtype='datetime64[ns]')
res = s.quantile(0.5)
assert res is pd.NaT
res = s.quantile([0.5])
exp = Series([pd.NaT], index=[0.5])
tm.assert_series_equal(res, exp)
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.