prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, date_range, timedelta_range
import pandas._testing as tm
class TestTimeSeries:
def test_contiguous_boolean_preserve_freq(self):
rng = date_range("1/1/2000", "3/1/2000", freq="B")
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
assert expected.freq == rng.freq
tm.assert_index_equal(masked, expected)
mask[22] = True
masked = rng[mask]
assert masked.freq is None
def test_promote_datetime_date(self):
rng = date_range("1/1/2000", periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq("4H", method="ffill")
expected = ts[5:].asfreq("4H", method="ffill")
tm.assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
tm.assert_numpy_array_equal(result, expected)
def test_series_map_box_timedelta(self):
# GH 11349
s = Series(timedelta_range("1 day 1 s", periods=5, freq="h"))
def f(x):
return x.total_seconds()
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_view_tz(self):
# GH#24024
ser = Series(pd.date_range("2000", periods=4, tz="US/Central"))
result = ser.view("i8")
expected = Series(
[
946706400000000000,
946792800000000000,
946879200000000000,
946965600000000000,
]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_asarray_object_dt64(self, tz):
ser = Series(pd.date_range("2000", periods=2, tz=tz))
with tm.assert_produces_warning(None):
# Future behavior (for tzaware case) with no warning
result = np.asarray(ser, dtype=object)
expected = np.array(
[pd.Timestamp("2000-01-01", tz=tz), pd.Timestamp("2000-01-02", tz=tz)]
)
tm.assert_numpy_array_equal(result, expected)
def test_asarray_tz_naive(self):
# This shouldn't produce a warning.
ser = Series( | pd.date_range("2000", periods=2) | pandas.date_range |
import os
import warnings
import pandas
import numpy as np
IDR_PATH = os.getenv("ZTFIDRPATH", "./dr2")
__all__ = ["get_targets_data",
"get_host_data",
"get_autotyping"]
# ================== #
# #
# TOP LEVEL #
# #
# ================== #
def get_targets_data():
""" """
redshifts = get_redshif_data()[["redshift","redshift_err", "redshift_source"]]
salt2params = get_salt2params()
coords = get_coords_data()
# merging
data_ = pandas.merge(redshifts,salt2params, left_index=True, right_index=True,
suffixes=("","_salt"), how="outer")
data_ = pandas.merge(data_, coords, left_index=True, right_index=True,
how="outer")
# set index
return data_
def get_host_data():
""" """
hostmags = get_host_mags()
hostcoords = get_host_coords()
return pandas.merge( pandas.concat([hostcoords], keys=["global"], axis=1), # multi index
hostmags, left_index=True, right_index=True, how="outer")
# ================== #
# #
# BASICS #
# #
# ================== #
# Master List
def get_masterlist(load=True, **kwargs):
""" """
filepath = os.path.join(IDR_PATH, "tables",
"ztfdr2_masterlist.csv")
if not load:
return filepath
return pandas.read_csv(filepath, **kwargs)
# Redshifts
def get_redshif_data(load=True, index_col=0, **kwargs):
""" """
filepath = os.path.join(IDR_PATH, "tables",
"ztfdr2_redshifts.csv")
if not load:
return filepath
data = pandas.read_csv(filepath, index_col=index_col, **kwargs)
data.index.name = 'ztfname'
return data
def get_snidauto_redshift(load=True, **kwargs):
""" """
filepath = os.path.join(IDR_PATH,"tables",
"ancilliary_info","snidauto_redshift.csv")
if not load:
return filepath
return pandas.read_csv(filepath, index_col=0, **kwargs)
# Coordinates
def get_coords_data(load=True, index_col=0, **kwargs):
""" """
filepath = os.path.join(IDR_PATH, "tables",
"ztfdr2_coordinates.csv")
if not load:
return filepath
return pandas.read_csv(filepath, index_col=index_col, **kwargs)
# SALT
def get_salt2params(load=True, default=True, **kwargs):
""" """
filename = "ztfdr2_salt2_params.csv" if default else "ztfdr2_salt2_params_phase-15to30_color-0.4to0.8.csv"
filepath = os.path.join(IDR_PATH, "tables",
filename)
if not load:
return filepath
return pandas.read_csv(filepath, **kwargs
).rename({"z":"redshift"}, axis=1
).set_index("ztfname")
# ================== #
# #
# HOST #
# #
# ================== #
def get_host_coords(load=True, **kwargs):
""" """
filepath = os.path.join(IDR_PATH, "tables",
"ancilliary_info/host_photometry/ztfdr2_hostcoords.csv")
if not load:
return filepath
return pandas.read_csv(filepath, **{**dict(sep=" "),**kwargs}).set_index("ztfname")
def get_host_mags(load=True, index_col=0, raw=False, **kwargs):
""" """
filepath = os.path.join(IDR_PATH, "tables",
"ancilliary_info/host_photometry/ztfdr2_hostmags.csv")
if not load:
return filepath
host_alldata = pandas.read_csv(filepath, index_col=index_col, **kwargs)
if raw:
return host_alldata
def _get_split_(which):
requested = f"{which}_mag"
if requested not in host_alldata:
raise ValueError(f"Unknown entry: {requested} (which={which})")
host_data = host_alldata[requested].astype(str).str.replace("nan", "np.nan").str.replace("inf", "np.nan").apply(eval)
host_err = host_alldata[requested+"_err"].astype(str).str.replace("nan", "np.nan").str.replace("inf", "np.nan").apply(eval)
flagna = host_data.isna() + host_err.isna()
data = pandas.DataFrame(list(host_data[~flagna]), index=host_data[~flagna].index)
error = pandas.DataFrame(list(host_err[~flagna]), index=host_err[~flagna].index)
error.columns += "_err"
return pandas.merge(data, error, left_index=True, right_index=True)
kpc2 = _get_split_(which="local_2kpc")
kpc4 = _get_split_(which="local_4kpc")
host_cat = _get_split_(which="host_cat")
hglobal = pandas.merge(host_cat,
host_alldata[["z","host_dlr"]].rename({"z":"redshift"}, axis=1),
left_index=True, right_index=True,
how="outer")
return pandas.concat([kpc2, kpc4, hglobal], axis=1,
keys=["2kpc", "4kpc", "global"])
# ================== #
# #
# LIGHTCURVES #
# #
# ================== #
def get_target_lc(target, test_exist=True):
""" """
fullpath = os.path.join(IDR_PATH, "lightcurves", f"{target}_LC.csv")
if test_exist:
if not os.path.isfile(fullpath):
warnings.warn(f"No lc file for {target} ; {fullpath}")
return None
return fullpath
def get_phase_coverage(load=True, warn=True, **kwargs):
""" """
filepath = os.path.join(IDR_PATH, "tables", "phase_coverage.csv")
if not load:
return filepath
if not os.path.isfile(filepath):
if warn:
warnings.warn(
"No phase_coverage file. build one using ztfidr.Sample().build_phase_coverage(store=True)")
return None
return pandas.read_csv(filepath, index_col=0, **kwargs
).reset_index().rename({"index": "name"}, axis=1
).set_index(["name", "filter"])["phase"]
# ================== #
# #
# Spectra #
# #
# ================== #
def get_autotyping(load=True, index_col=0, **kwargs):
""" """
filepath = os.path.join(IDR_PATH, "tables",
"autotyping.csv")
if not load:
return filepath
return pandas.read_csv(filepath, index_col=index_col, **kwargs)
def get_spectra_datafile(contains=None, startswith=None,
snidres=False, extension=None, use_dask=False):
""" """
from glob import glob
glob_format = "*" if not startswith else f"{startswith}*"
if snidres and extension is None:
extension = "_snid.h5"
elif extension is None:
extension = ".ascii"
if contains is not None:
glob_format += f"{contains}*"
if extension is not None:
glob_format += f"{extension}"
specfiles = glob(os.path.join(IDR_PATH, "spectra", glob_format))
datafile = pandas.DataFrame(specfiles, columns=["fullpath"])
datafile["basename"] = datafile["fullpath"].str.split(
"/", expand=True).iloc[:, -1]
return pandas.concat([datafile, parse_filename(datafile["basename"], snidres=snidres)], axis=1)
def parse_filename(file_s, snidres=False):
""" file or list of files.
Returns
-------
Serie if single file, DataFrame otherwise
"""
index = ["ztfname", "date", "telescope", "version"]
fdata = []
for file_ in np.atleast_1d(file_s):
file_ = os.path.basename(file_).split(".ascii")[0]
if snidres:
#print(file_)
name, date, *telescope, origin, snid_ = file_.split("_")
else:
try:
name, date, *telescope, origin = file_.split("_")
except:
print(f"failed parsing filename for {file_}")
continue
telescope = "_".join(telescope)
fdata.append([name, date, telescope, origin])
if len(fdata) == 1:
return pandas.Series(fdata[0], index=index)
return | pandas.DataFrame(fdata, columns=index) | pandas.DataFrame |
#program to turn list of unzipped folders of templates etc from the NDA into something that more closely resembles a crosswalk
import os
import pandas as pd
from openpyxl import load_workbook
#specify path to the folder that contains the unzipped folders from the NDA - end path in '/' or
# tell me how I can make this code robust to variations
#inout='/yourpath/NIH_toolbox_crosswalk_docs/'
inout='/home/petra/UbWinSharedSpace1/redcap2nda_Lifespan2019/NIH_toolbox_crosswalk_docs/HCPD/'
#open one of the folders that the NDA sent and find the file that contains the list of vars in
#your file and the list of vars in the NDA as one-to-one pairs
#for example, we have files with the column headers "NDA Element" and "HCP-D Element"
#we also have files with the column headers "NDA Element" and "HCP-A Element"
#provide the name of local variable:
localvar="HCP-D Element"
# get list of files and folders in the inout directory
# The string containing the name of the new folders from the NDA will be assigned to
# variable called 'Instrument_Short' because after filtering out all the other
# files/folders (list1 and list2 below) thats what you'll have:
# the NDAs shorthand name for the NIH Toolbox Instrument
dirs=pd.DataFrame(os.listdir(inout),columns=['Instrument_Short'])
#initialize an empty crosswalk
crosswalk_meta=pd.DataFrame(columns=['Inst','Instrument_Short','key','template','requestfile','varmapfile'])
#create two lists of for file extensions and/or folders in inout that you don't want to be treated
# as something to be added to a crosswalk
list1=['zip','csv','xlsx','pptx'] #file extensions in this folder of folders from Leo that you want to ignore
list2=['drafts','HCPD','temp','added_tocrosswalk','dummypass','almost trash','prepped_structures'] #identify folders you want to ignore
# for the items not in list1 or list2, read the folder contents and turn them into something appendable/mergeable
# four possible files:
# Mapping Key has the Full Name of the NDA structure to which this stuff will be mapped, the short name,
# and the name on the Form as we will be uploading it (Inst)
# One file in this folder has an Instrument_short.structure name format
# Occasionally you'll get a formRequest file which will contains NDA verbal instructions that
# ultimately need to be translated into executable python code (more on this later).
# The template file is not used in the crosswalk, but may come in handy for debugging (i.e. if someone
# on either end of this process accidentally assigned the wrong aliases).
for i in dirs.Instrument_Short:
for j in list1:
if j in i:
print('skipping file '+i)
i='dummypass'
if i in list2:
if i not in 'dummypass':
print('skipping folder '+i)
else:
requestfile='' #need to initialize this particular var becaus some folders dont have requests
cw = pd.DataFrame(columns=['Instrument_Short', 'key', 'template', 'requestfile', 'varmapfile'])
haystack=os.listdir(inout+i)
for j, element in enumerate(haystack):
if "Key" in element:
key=element
elif "template" in element:
template=element
elif "01.xlsx" in element:
varmapfile=element
elif 'equest' in element:
requestfile=element
cw.loc[0, ['Instrument_Short', 'key','template','varmapfile','requestfile']] = [i, key,template,varmapfile,requestfile]
wb = load_workbook(inout + cw.Instrument_Short[0] + '/' + cw.varmapfile[0])
ws = wb.active
df = | pd.DataFrame(ws.values) | pandas.DataFrame |
from flask import Flask, request, Response
from flask_restful import Resource
import pandas as pd
class GlobalFunctions():
global readCsv
def readCsv(fileName):
chunk = pd.read_csv(f'./unprocessedFiles/{fileName}',chunksize=100000)
df = pd.concat(chunk)
return df
class ReadCsvByName(Resource):
def post(self):
#Retrieve Parameters
json_data = request.get_json(force=True)
fileName = json_data['fileName']
pdDf = readCsv(fileName) #Reads CSV
pdDf.sort_values(by='Name',inplace=True,ascending=True) #Sorting Value by Name
pdDf.to_csv(path_or_buf=f'./processedFiles/processed_{fileName}_readCsvByName.csv') #Saving the file in local storage temporarily
return Response(pdDf.to_json(orient='records', lines=True).splitlines(),mimetype='application/json',status=200)
class RearrangeByOrder(Resource):
def post(self):
json_data = request.get_json(force=True)
fileName = json_data['fileName']
sortBy = json_data['sortBy']
order = True
if json_data['orderBy']=='desc':
order = False
pdDf = readCsv(fileName)
pdDf.sort_values(by=sortBy,inplace=True,ascending=order)
pdDf.to_csv(path_or_buf=f'./processedFiles/processed_{fileName}_rearrangeByOrder_{sortBy}.csv') #Saving the file in local storage temporarily
return Response(pdDf.to_json(orient='records', lines=True).splitlines(),mimetype='application/json',status=200)
class HighestClosingPrice(Resource):
def post(self):
json_data = request.get_json(force=True)
fileName = json_data['fileName']
fromDate = json_data['fromDate']
toDate = json_data['toDate']
stockName = json_data['name']
pdDf = readCsv(fileName)
pdDf['date'] = | pd.to_datetime(pdDf['date']) | pandas.to_datetime |
import itertools
import os
import random
import tempfile
from unittest import mock
import pandas as pd
import pytest
import pickle
import numpy as np
import string
import multiprocessing as mp
from copy import copy
import dask
import dask.dataframe as dd
from dask.dataframe._compat import tm, assert_categorical_equal
from dask import delayed
from dask.base import compute_as_if_collection
from dask.optimization import cull
from dask.dataframe.shuffle import (
shuffle,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
maybe_buffered_partd,
remove_nans,
)
from dask.dataframe.utils import assert_eq, make_meta
from dask.dataframe._compat import PANDAS_GT_120
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [1, 4, 7]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [2, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [3, 6, 9]}, index=[9, 9, 9]),
}
meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
shuffle_func = shuffle # conflicts with keyword argument
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_shuffle(shuffle):
s = shuffle_func(d, d.b, shuffle=shuffle)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = dask.get(s.dask, (s._name, 0))
y = dask.get(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions_task():
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle="tasks", npartitions=17, max_branch=4)
sc = s.compute(scheduler="sync")
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_non_series(method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(shuffle(d, d.b, shuffle=method), shuffle(d, "b", shuffle=method))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_dataframe(method):
res1 = shuffle(d, d[["b"]], shuffle=method).compute()
res2 = shuffle(d, ["b"], shuffle=method).compute()
res3 = shuffle(d, "b", shuffle=method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_from_one_partition_to_one_other(method):
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, "x", npartitions=i, shuffle=method)
assert len(a.compute(scheduler="sync")) == len(b.compute(scheduler="sync"))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_empty_partitions(method):
df = pd.DataFrame({"x": [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=method)
parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame(
{
"i32": np.array([1, 2, 3] * 3, dtype="int32"),
"f32": np.array([None, 2.5, 3.5] * 3, dtype="float32"),
"cat": pd.Series(["a", "b", "c"] * 3).astype("category"),
"obj": pd.Series(["d", "e", "f"] * 3),
"bool": np.array([True, False, True] * 3),
"dt": pd.Series(pd.date_range("20130101", periods=9)),
"dt_tz": pd.Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
"td": pd.Series(pd.timedelta_range("2000", periods=9)),
}
)
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[["i32"]], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[["cat", "bool", "f32"]], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({"a": list(string.ascii_letters), "b": [1, 2, 3, 4] * 13})
df.a = df.a.astype("category")
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize(
"npartitions", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]
)
def test_set_index_tasks(npartitions):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index("x"), ddf.set_index("x", shuffle="tasks"))
assert_eq(df.set_index("y"), ddf.set_index("y", shuffle="tasks"))
assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle="tasks"))
assert_eq(df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle="tasks"))
assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle="tasks"))
assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle="tasks"))
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_self_index(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle)
assert a is b
assert_eq(b, df.set_index(df.index))
@pytest.mark.parametrize("shuffle", ["tasks"])
def test_set_index_names(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
assert set(ddf.set_index("x", shuffle=shuffle).dask) == set(
ddf.set_index("x", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", shuffle=shuffle).dask) != set(
ddf.set_index("y", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", max_branch=4, shuffle=shuffle).dask) != set(
ddf.set_index("x", max_branch=3, shuffle=shuffle).dask
)
assert set(ddf.set_index("x", drop=True, shuffle=shuffle).dask) != set(
ddf.set_index("x", drop=False, shuffle=shuffle).dask
)
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_2(shuffle):
df = dd.demo.make_timeseries(
"2000",
"2004",
{"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
df2 = df.set_index("name", shuffle=shuffle)
df2.value.sum().compute(scheduler="sync")
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_3(shuffle):
df = pd.DataFrame(np.random.random((10, 2)), columns=["x", "y"])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, max_branch=2, npartitions=ddf.npartitions
)
df2 = df.set_index("x")
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
def test_shuffle_sort(shuffle):
df = pd.DataFrame({"x": [1, 2, 3, 2, 1], "y": [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index("x").sort_index()
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_rearrange(shuffle, scheduler):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle=shuffle)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(scheduler=scheduler)
get = dask.base.get_scheduler(scheduler=scheduler)
parts = get(result.dask, result.__dask_keys__())
for i in a._partitions.drop_duplicates():
assert sum(i in set(part._partitions) for part in parts) == 1
def test_rearrange_cleanup():
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle="disk")
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def mock_shuffle_group_3(df, col, npartitions, p):
raise ValueError("Mock exception!")
def test_rearrange_disk_cleanup_with_exception():
# ensure temporary files are cleaned up when there's an internal exception.
with mock.patch("dask.dataframe.shuffle.shuffle_group_3", new=mock_shuffle_group_3):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
with pytest.raises(ValueError, match="Mock exception!"):
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle="disk"
)
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, "x", (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd():
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({"x": [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.config.set(get=throw):
ddf2 = ddf.set_index("x", divisions=[1, 3, 5])
assert ddf2.divisions == (1, 3, 5)
df2 = df.set_index("x")
assert_eq(ddf2, df2)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("x", divisions=[3, 1, 5])
def test_set_index_divisions_2():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index("y", divisions=["a", "c", "d"])
assert result.divisions == ("a", "c", "d")
assert list(result.compute(scheduler="sync").index[-2:]) == ["d", "d"]
def test_set_index_divisions_compute():
d2 = d.set_index("b", divisions=[0, 2, 9], compute=False)
d3 = d.set_index("b", divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index("b"))
assert_eq(d3, full.set_index("b"))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_set_index_divisions_sorted():
p1 = pd.DataFrame({"x": [10, 11, 12], "y": ["a", "a", "a"]})
p2 = pd.DataFrame({"x": [13, 14, 15], "y": ["b", "b", "c"]})
p3 = pd.DataFrame({"x": [16, 17, 18], "y": ["d", "e", "e"]})
ddf = dd.DataFrame(
{("x", 0): p1, ("x", 1): p2, ("x", 2): p3}, "x", p1, [None, None, None, None]
)
df = ddf.compute()
def throw(*args, **kwargs):
raise Exception("Shouldn't have computed")
with dask.config.set(get=throw):
res = ddf.set_index("x", divisions=[10, 13, 16, 18], sorted=True)
assert_eq(res, df.set_index("x"))
with dask.config.set(get=throw):
res = ddf.set_index("y", divisions=["a", "b", "d", "e"], sorted=True)
assert_eq(res, df.set_index("y"))
# with sorted=True, divisions must be same length as df.divisions
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "c", "d", "e"], sorted=True)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "d", "c"], sorted=True)
@pytest.mark.slow
def test_set_index_consistent_divisions():
# See https://github.com/dask/dask/issues/3867
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
ddf = ddf.clear_divisions()
ctx = mp.get_context("spawn")
pool = ctx.Pool(processes=8)
with pool:
results = [pool.apply_async(_set_index, (ddf, "x")) for _ in range(100)]
divisions_set = set(result.get() for result in results)
assert len(divisions_set) == 1
def _set_index(df, *args, **kwargs):
return df.set_index(*args, **kwargs).divisions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_small(shuffle):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index("x", shuffle=shuffle, npartitions="auto")
assert ddf2.npartitions < 10
def make_part(n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_large(shuffle):
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert 1 < ddf2.npartitions < 20
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_doesnt_increase_partitions(shuffle):
nparts = 2
nbytes = 1e6
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert ddf2.npartitions <= ddf.npartitions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_detects_sorted_data(shuffle):
df = pd.DataFrame({"x": range(100), "y": range(100)})
ddf = dd.from_pandas(df, npartitions=10, name="x", sort=False)
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_set_index_sorts():
# https://github.com/dask/dask/issues/2288
vals = np.array(
[
1348550149000000000,
1348550149000000000,
1348558142000000000,
1348558142000000000,
1348585928000000000,
1348585928000000000,
1348600739000000000,
1348601706000000000,
1348600739000000000,
1348601706000000000,
1348614789000000000,
1348614789000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348637628000000000,
1348638159000000000,
1348638160000000000,
1348638159000000000,
1348638160000000000,
1348637628000000000,
1348646354000000000,
1348646354000000000,
1348659107000000000,
1348657111000000000,
1348659107000000000,
1348657111000000000,
1348672876000000000,
1348672876000000000,
1348682787000000000,
1348681985000000000,
1348682787000000000,
1348681985000000000,
1348728167000000000,
1348728167000000000,
1348730745000000000,
1348730745000000000,
1348750198000000000,
1348750198000000000,
1348750198000000000,
1348753539000000000,
1348753539000000000,
1348753539000000000,
1348754449000000000,
1348754449000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
]
)
vals = pd.to_datetime(vals, unit="ns")
breaks = [10, 36, 58]
dfs = []
for i in range(len(breaks)):
lo = sum(breaks[:i])
hi = sum(breaks[i : i + 1])
dfs.append(pd.DataFrame({"timestamp": vals[lo:hi]}, index=range(lo, hi)))
ddf = dd.concat(dfs).clear_divisions()
assert ddf.set_index("timestamp").index.compute().is_monotonic is True
def test_set_index():
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 2, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [9, 1, 8]}, index=[9, 9, 9]),
}
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index("b", npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == "b"
assert_eq(d2, full.set_index("b"))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == "b"
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index("b")
assert d4.index.name == "b"
assert_eq(d4, full.set_index("b"))
d5 = d.set_index(["b"])
assert d5.index.name == "b"
assert_eq(d5, full.set_index(["b"]))
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
df = pd.DataFrame({"x": [4, 1, 1, 3, 3], "y": [1.0, 1, 1, 1, 2]})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=3)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == set([1, 2, 4])
d2 = d.set_index("y", npartitions=3)
assert d2.divisions[0] == 1.0
assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0
assert d2.divisions[3] == 2.0
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate_int(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({"x": 2 * L})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate_large_uint(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
"""This test is for #7304"""
df = pd.DataFrame(
{"x": np.array([612509347682975743, 616762138058293247], dtype=np.uint64)}
)
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 1)
d1 = d.set_index("x", npartitions=1)
assert d1.npartitions == 1
assert set(d1.divisions) == set([612509347682975743, 616762138058293247])
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
df = pd.DataFrame({"tz": s_aware, "notz": s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index("notz", npartitions=1)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index("tz", npartitions=1)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
if PANDAS_GT_120:
# starting with pandas 1.2.0, comparing equality of timestamps with different
# timezones returns False instead of raising an error
assert not d2.divisions[0] == s2badtype[0]
else:
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
def test_set_index_npartitions():
# https://github.com/dask/dask/issues/6974
data = pd.DataFrame(
index=pd.Index(
["A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "C"]
)
)
data = dd.from_pandas(data, npartitions=2)
output = data.reset_index().set_index("index", npartitions=1)
assert output.npartitions == 1
@pytest.mark.parametrize("unit", ["ns", "us"])
def test_set_index_datetime_precision(unit):
# https://github.com/dask/dask/issues/6864
df = pd.DataFrame(
[
[1567703791155681, 1],
[1567703792155681, 2],
[1567703790155681, 0],
[1567703793155681, 3],
],
columns=["ts", "rank"],
)
df.ts = pd.to_datetime(df.ts, unit=unit)
ddf = dd.from_pandas(df, npartitions=2)
ddf = ddf.set_index("ts")
assert_eq(ddf, df.set_index("ts"))
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame(
{
"A": list("ABAABBABAA"),
"B": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"C": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index("A", drop=drop), pdf.set_index("A", drop=drop))
assert_eq(ddf.set_index("B", drop=drop), pdf.set_index("B", drop=drop))
assert_eq(ddf.set_index("C", drop=drop), pdf.set_index("C", drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame(
{
0: list("ABAABBABAA"),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with pytest.raises(NotImplementedError) as err:
ddf.set_index(["a", "b"])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a", "b"]])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a"]])
assert msg in str(err.value)
def test_set_index_sorted_true():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index("x", sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index("x", drop=drop), df.set_index("x", drop=drop))
assert_eq(
a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)
)
assert_eq(
a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop),
)
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_set_index_sorted_single_partition():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(ddf.set_index("x", sorted=True), df.set_index("x"))
def test_set_index_sorted_min_max_same():
a = pd.DataFrame({"x": [1, 2, 3], "y": [0, 0, 0]})
b = pd.DataFrame({"x": [1, 2, 3], "y": [1, 1, 1]})
aa = delayed(a)
bb = delayed(b)
df = dd.from_delayed([aa, bb], meta=a)
assert not df.known_divisions
df2 = df.set_index("y", sorted=True)
assert df2.divisions == (0, 1, 1)
def test_set_index_empty_partition():
test_vals = [1, 2, 3]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for conv in converters:
df = pd.DataFrame(
[{"x": conv(i), "y": i} for i in test_vals], columns=["x", "y"]
)
ddf = dd.concat(
[
dd.from_pandas(df, npartitions=1),
dd.from_pandas(df[df.y > df.y.max()], npartitions=1),
]
)
assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))
assert assert_eq(ddf.set_index("x"), df.set_index("x"))
def test_set_index_on_empty():
test_vals = [1, 2, 3, 4]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for converter in converters:
df = pd.DataFrame([{"x": converter(x), "y": x} for x in test_vals])
ddf = dd.from_pandas(df, npartitions=4)
assert ddf.npartitions > 1
ddf = ddf[ddf.y > df.y.max()].set_index("x")
expected_df = df[df.y > df.y.max()].set_index("x")
assert assert_eq(ddf, expected_df, **CHECK_FREQ)
assert ddf.npartitions == 1
def test_set_index_categorical():
# https://github.com/dask/dask/issues/5671
order = list(reversed(string.ascii_letters))
values = list(string.ascii_letters)
random.shuffle(values)
dtype = pd.api.types.CategoricalDtype(order, ordered=True)
df = pd.DataFrame({"A": pd.Categorical(values, dtype=dtype), "B": 1})
result = dd.from_pandas(df, npartitions=2).set_index("A")
assert len(result) == len(df)
# sorted with the metric defined by the Categorical
divisions = pd.Categorical(result.divisions, dtype=dtype)
assert_categorical_equal(divisions, divisions.sort_values())
def test_compute_divisions():
from dask.dataframe.shuffle import compute_and_set_divisions
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]},
index=[1, 3, 10, 20],
)
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = compute_and_set_divisions(copy(a))
assert_eq(a, b, check_divisions=False)
assert b.known_divisions
def test_empty_partitions():
# See https://github.com/dask/dask/issues/2408
df = pd.DataFrame({"a": list(range(10))})
df["b"] = df["a"] % 3
df["c"] = df["b"].astype(str)
ddf = dd.from_pandas(df, npartitions=3)
ddf = ddf.set_index("b")
ddf = ddf.repartition(npartitions=3)
ddf.get_partition(0).compute()
assert_eq(ddf, df.set_index("b"))
ddf = ddf.set_index("c")
assert_eq(ddf, df.set_index("b").set_index("c"))
def test_remove_nans():
tests = [
((1, 1, 2), (1, 1, 2)),
((None, 1, 2), (1, 1, 2)),
((1, None, 2), (1, 2, 2)),
((1, 2, None), (1, 2, 2)),
((1, 2, None, None), (1, 2, 2, 2)),
((None, None, 1, 2), (1, 1, 1, 2)),
((1, None, None, 2), (1, 2, 2, 2)),
((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),
]
converters = [
(int, np.nan),
(float, np.nan),
(str, np.nan),
(lambda x: pd.to_datetime(x, unit="ns"), np.datetime64("NaT")),
]
for conv, none_val in converters:
for inputs, expected in tests:
params = [none_val if x is None else conv(x) for x in inputs]
expected = [conv(x) for x in expected]
assert remove_nans(params) == expected
@pytest.mark.slow
def test_gh_2730():
large = pd.DataFrame({"KEY": np.arange(0, 50000)})
small = pd.DataFrame({"KEY": np.arange(25, 500)})
dd_left = dd.from_pandas(small, npartitions=3)
dd_right = dd.from_pandas(large, npartitions=257)
with dask.config.set(shuffle="tasks", scheduler="sync"):
dd_merged = dd_left.merge(dd_right, how="inner", on="KEY")
result = dd_merged.compute()
expected = large.merge(small, how="inner", on="KEY")
tm.assert_frame_equal(result.sort_values("KEY").reset_index(drop=True), expected)
@pytest.mark.parametrize("npartitions", [None, "auto"])
def test_set_index_does_not_repeat_work_due_to_optimizations(npartitions):
# Atomic counter
count = itertools.count()
def increment():
next(count)
def make_part(dummy, n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
dsk = {("inc", i): (increment,) for i in range(nparts)}
dsk.update({("x", i): (make_part, ("inc", i), n) for i in range(nparts)})
ddf = dd.DataFrame(dsk, "x", make_part(None, 1), [None] * (nparts + 1))
ddf.set_index("x", npartitions=npartitions)
ntimes = next(count)
assert ntimes == nparts
def test_set_index_errors_with_inplace_kwarg():
df = pd.DataFrame({"a": [9, 8, 7], "b": [6, 5, 4], "c": [3, 2, 1]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.set_index("a")
with pytest.raises(NotImplementedError):
ddf.set_index("a", inplace=True)
def test_set_index_timestamp():
df = pd.DataFrame({"A": pd.date_range("2000", periods=12, tz="US/Central"), "B": 1})
ddf = dd.from_pandas(df, 2)
divisions = (
pd.Timestamp("2000-01-01 00:00:00-0600", tz="US/Central", freq="D"),
| pd.Timestamp("2000-01-12 00:00:00-0600", tz="US/Central", freq="D") | pandas.Timestamp |
import abc
import time, random
import pandas as pd
import os
import numpy as np
import benchutils as utils
import knowledgebases
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_selection import RFE, VarianceThreshold
from sklearn import preprocessing
class FeatureSelectorFactory():
"""Singleton class.
Python code encapsulates it in a way that is not shown in Sphinx, so have a look at the descriptions in the source code.
Creates feature selector object based on a given name.
New feature selection approaches must be registered here.
Names for feature selectors must follow to a particular scheme, with keywords separated by _:
- first keyword is the actual selector name
- if needed, second keyword is the knowledge base
- if needed, third keyword is the (traditional) approach to be combined
Examples:
- Traditional Approaches have only one keyword, e.g. InfoGain or ANOVA
- LassoPenalty_KEGG provides KEGG information to the LassoPenalty feature selection approach
- Weighted_KEGG_InfoGain --> Factory creates an instance of KBweightedSelector which uses KEGG as knowledge base and InfoGain as traditional selector.
While the focus here lies on the combination of traditional approaches with prior biological knowledge, it is theoretically possible to use ANY selector object for combination that inherits from :class:`FeatureSelector`.
:param config: configuration parameters for UMLS web service as specified in config file.
:type config: dict
"""
class __FeatureSelectorFactory():
def createFeatureSelector(self, name):
"""Create selector from a given name.
Separates creation process into (traditional) approaches (only one keyword), approaches requiring a knowledge base, and approaches requiring both a knowledge base and another selector, e.g. a traditional one.
:param name: selector name following the naming conventions: first keyword is the actual selector name, second keyword is the knowledge base, third keyword another selector to combine. Keywords must be separated by "_". Example: Weighted_KEGG_InfoGain
:type name: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
parts = name.split("_")
if len(parts) == 1:
return self.createTraditionalSelector(name)
elif len(parts) == 2:
return self.createIntegrativeSelector(parts[0], parts[1])
elif len(parts) == 3:
return self.createCombinedSelector(parts[0], parts[1], parts[2])
utils.logError("ERROR: The provided selector name does not correspond to the expected format. "
"A selector name should consist of one or more keywords separated by _. "
"The first keyword is the actual approach (e.g. weighted, or a traditional approach), "
"the second keyword corresponds to a knowledge base to use (e.g. KEGG),"
"the third keyword corresponds to a traditional selector to use (e.g. when using a modifying or combining approach")
exit()
def createTraditionalSelector(self, selectorName):
"""Creates a (traditional) selector (without a knowledge base) from a given name.
Register new implementations of a (traditional) selector here.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
if selectorName == "Random":
return RandomSelector()
if selectorName == "VB-FS":
return VarianceSelector()
if selectorName == "Variance":
return Variance2Selector()
if selectorName == "ANOVA":
return AnovaSelector()
if selectorName == "mRMR":
return MRMRSelector()
if selectorName == "SVMpRFE":
return SVMRFESelector()
# RUN WEKA FEATURE SELECTION AS SELECTED
if selectorName == "InfoGain":
return InfoGainSelector()
if selectorName == "ReliefF":
return ReliefFSelector()
#if "-RFE" in selectorName or "-SFS" in selectorName: -- SFS is currently disabled because sometimes the coef_ param is missing and error is thrown
if "-RFE" in selectorName:
return WrapperSelector(selectorName)
if selectorName == "Lasso":
return LassoSelector()
if selectorName == "RandomForest":
return RandomForestSelector()
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
def createIntegrativeSelector(self, selectorName, kb):
"""Creates a feature selector using a knowledge base from the given selector and knowledge base names.
Register new implementations of a prior knowledge selector here that does not requires a (traditional) selector.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:param kb: knowledge base name
:type kb: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
kbfactory = knowledgebases.KnowledgeBaseFactory()
knowledgebase = kbfactory.createKnowledgeBase(kb)
if selectorName == "NetworkActivity":
featuremapper = PathwayActivityMapper()
return NetworkActivitySelector(knowledgebase, featuremapper)
if selectorName == "CorgsNetworkActivity":
featuremapper = CORGSActivityMapper()
return NetworkActivitySelector(knowledgebase, featuremapper)
if selectorName == "LassoPenalty":
return LassoPenalty(knowledgebase)
if selectorName == "KBonly":
return KbSelector(knowledgebase)
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
def createCombinedSelector(self, selectorName, trad, kb):
"""Creates a feature selector that combines a knowledge base and another feature selector based on the given names.
Register new implementations of a prior knowledge selector that requires another selector here.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:param trad: name of the (traditional) feature selector.
:type trad: str
:param kb: knowledge base name
:type kb: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
tradSelector = self.createTraditionalSelector(trad)
kbfactory = knowledgebases.KnowledgeBaseFactory()
knowledgebase = kbfactory.createKnowledgeBase(kb)
if selectorName == "Postfilter":
return PostFilterSelector(knowledgebase, tradSelector)
if selectorName == "Prefilter":
return PreFilterSelector(knowledgebase, tradSelector)
if selectorName == "Extension":
return ExtensionSelector(knowledgebase, tradSelector)
if selectorName == "Weighted":
return KBweightedSelector(knowledgebase, tradSelector)
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
instance = None
def __init__(self):
if not FeatureSelectorFactory.instance:
FeatureSelectorFactory.instance = FeatureSelectorFactory.__FeatureSelectorFactory()
def __getattr__(self, name):
return getattr(self.instance, name)
class FeatureSelector:
"""Abstract super class for feature selection functionality.
Every feature selection class has to inherit from this class and implement its :meth:`FeatureSelector.selectFeatures` method and - if necessary - its :meth:`FeatureSelector.setParams` method.
Once created, feature selection can be triggered by first setting parameters (input, output, etc) as needed with :meth:`FeatureSelector.setParams`.
The actual feature selection is triggered by invoking :meth:`FeatureSelector.selectFeatures`.
:param input: absolute path to input dataset.
:type input: str
:param output: absolute path to output directory (where the ranking will be stored).
:type output: str
:param dataset: the dataset for which to select features. Will be loaded dynamically based on self.input at first usage.
:type dataset: :class:`pandas.DataFrame`
:param dataConfig: config parameters for input data set.
:type dataConfig: dict
:param name: selector name
:type name: str
"""
def __init__(self, name):
self.input = None
self.output = None
self.dataset = None
self.loggingDir = None
self.dataConfig = utils.getConfig("Dataset")
self.setTimeLogs(utils.createTimeLog())
self.enableLogFlush()
self.name = name
super().__init__()
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Invoke feature selection functionality in this method when implementing a new selector
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def getTimeLogs(self):
"""Gets all logs for this selector.
:return: dataframe of logged events containing start/end time, duration, and a short description.
:rtype: :class:`pandas.DataFrame`
"""
return self.timeLogs
def setTimeLogs(self, newTimeLogs):
"""Overwrites the current logs with new ones.
:param newTimeLogs: new dataframe of logged events containing start/end time, duration, and a short description.
:type newTimeLogs: :class:`pandas.DataFrame`
"""
self.timeLogs = newTimeLogs
def disableLogFlush(self):
"""Disables log flushing (i.e., writing the log to a separate file) of the selector at the end of feature selection.
This is needed when a :class:`CombiningSelector` uses a second selector and wants to avoid that its log messages are written, potentially overwriting logs from another selector of the same name.
"""
self.enableLogFlush = False
def enableLogFlush(self):
"""Enables log flushing, i.e. writing the logs to a separate file at the end of feature selection.
"""
self.enableLogFlush = True
def getName(self):
"""Gets the selector's name.
:return: selector name.
:rtype: str
"""
return self.name
def getData(self):
"""Gets the labeled dataset from which to select features.
:return: dataframe containing the dataset with class labels.
:rtype: :class:`pandas.DataFrame`
"""
if self.dataset is None:
self.dataset = pd.read_csv(self.input, index_col=0)
return self.dataset
def getUnlabeledData(self):
"""Gets the dataset without labels.
:return: dataframe containing the dataset without class labels.
:rtype: :class:`pandas.DataFrame`
"""
dataset = self.getData()
return dataset.loc[:, dataset.columns != "classLabel"]
def getFeatures(self):
"""Gets features from the dataset.
:return: list of features.
:rtype: list of str
"""
return self.getData().columns[1:]
def getUniqueLabels(self):
"""Gets the unique class labels available in the dataset.
:return: list of distinct class labels.
:rtype: list of str
"""
return list(set(self.getLabels()))
def getLabels(self):
"""Gets the labels in the data set.
:return: all labels from the dataset.
:rtype: list of str
"""
return list(self.getData()["classLabel"])
def setParams(self, inputPath, outputDir, loggingDir):
"""Sets parameters for the feature selection run: path to the input datast and path to the output directory.
:param inputPath: absolute path to the input file containing the dataset for analysis.
:type inputPath: str
:param outputDir: absolute path to the output directory (where to store the ranking)
:type outputDir: str
:param loggingDir: absolute path to the logging directory (where to store log files)
:type loggingDir: str
"""
self.input = inputPath
self.output = outputDir
self.loggingDir = loggingDir
def writeRankingToFile(self, ranking, outputFile, index = False):
"""Writes a given ranking to a specified file.
:param ranking: dataframe with the ranking.
:type ranking: :class:`pandas.DataFrame`
:param outputFile: absolute path of the file where ranking will be stored.
:type outputFile: str
:param index: whether to write the dataframe's index or not.
:type index: bool, default False
"""
if not ranking.empty:
ranking.to_csv(outputFile, index = index, sep = "\t")
else:
#make sure to write at least the header if the dataframe is empty
with open(outputFile, 'w') as outfile:
header_line = "\"attributeName\"\t\"score\"\n"
outfile.write(header_line)
class PythonSelector(FeatureSelector):
"""Abstract.
Inherit from this class when implementing a feature selector using any of scikit-learn's functionality.
As functionality invocation, input preprocessing and output postprocessing are typically very similar/the same for such implementations, this class already encapsulates it.
Instead of implementing :meth:`PythonSelector.selectFeatures`, implement :meth:`PythonSelector.runSelector`.
"""
def __init__(self, name):
super().__init__(name)
@abc.abstractmethod
def runSelector(self, data, labels):
"""Abstract - implement this method when inheriting from this class.
Runs the actual feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
pass
def selectFeatures(self):
"""Executes the feature selection procedure.
Prepares the input data set to match scikit-learn's expected formats and postprocesses the output to create a ranking.
:return: absolute path to the output ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outputFile = self.output + filename
data, labels = self.prepareInput()
selector = self.runSelector(data, labels)
self.prepareOutput(outputFile, data, selector)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outputFile
def prepareInput(self):
"""Prepares the input data set before running any of scikit-learn's selectors.
Removes the labels from the input data set and encodes the labels in numbers.
:return: dataset (without labels) and labels encoded in numbers.
:rtype: :class:`pandas.DataFrame` and list of int
"""
start = time.time()
labels = self.getLabels()
data = self.getUnlabeledData()
le = preprocessing.LabelEncoder()
numeric_labels = le.fit_transform(labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Input Preparation")
return data, numeric_labels
def prepareOutput(self, outputFile, data, selector):
"""Transforms the selector output to a valid ranking and stores it into the specified file.
:param outputFile: absolute path of the file to which to write the ranking.
:type outputFile: str
:param data: input dataset.
:type data: :class:`pandas.DataFrame`
:param selector: selector object from scikit-learn.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.scores_
ranking = ranking.sort_values(by='score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Output Preparation")
class RSelector(FeatureSelector,metaclass=abc.ABCMeta):
"""Selector class for invoking R code for feature selection.
Inherit from this class if you want to use R code, implement :meth:`RSelector.createParams` with what your script requires, and set self.scriptName accordingly.
:param rConfig: config parameters to execute R code.
:type rConfig: dict
"""
def __init__(self, name):
self.rConfig = utils.getConfig("R")
self.scriptName = "FS_" + name + ".R"
super().__init__(name)
@abc.abstractmethod
def createParams(self, filename):
"""Abstract.
Implement this method to set the parameters your R script requires.
:param filename: absolute path of the output file.
:type filename: str
:return: list of parameters to use for R code execution, e.g. input and output filenames.
:rtype: list of str
"""
pass
def selectFeatures(self):
"""Triggers the feature selection.
Actually a wrapper method that invokes external R code.
:return: absolute path to the result file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outputFile = self.output + filename
params = self.createParams(outputFile)
utils.runRCommand(self.rConfig, self.scriptName , params)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return filename
class JavaSelector(FeatureSelector):
"""Selector class for invoking R code for feature selection.
Inherit from this class if you want to use R code, implement :meth:`RSelector.createParams` with what your script requires, and set self.scriptName accordingly.
:param javaConfig: config parameters to execute java code.
:type javaConfig: dict
"""
def __init__(self, name):
self.javaConfig = utils.getConfig("Java")
super().__init__(name)
@abc.abstractmethod
def createParams(self):
"""Abstract.
Implement this method to set the parameters your java code requires.
:return: list of parameters to use for java code execution, e.g. input and output filenames.
:rtype: list of str
"""
pass
def selectFeatures(self):
"""Triggers the feature selection.
Actually a wrapper method that invokes external java code.
:return: absolute path to the result file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.name + ".csv"
params = self.createParams()
utils.runJavaCommand(self.javaConfig, "/WEKA_FeatureSelector.jar", params)
output_filepath = self.output + filename
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return output_filepath
############################### PRIOR KNOWLEDGE SELECTORS ###############################
class PriorKnowledgeSelector(FeatureSelector,metaclass=abc.ABCMeta):
"""Super class for all prior knowledge approaches.
If you want to implement an own prior knowledge approach that uses a knowledge base (but not a second selector and no network approaches), inherit from this class.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param alternativeSearchTerms: list of alternative search terms to use for querying the knowledge base.
:type alternativeSearchTerms: list of str
"""
def __init__(self, name, knowledgebase):
self.knowledgebase = knowledgebase
super().__init__(name)
self.alternativeSearchTerms = self.collectAlternativeSearchTerms()
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Implement this method when inheriting from this class.
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def collectAlternativeSearchTerms(self):
"""Gets all alternative search terms that were specified in the config file and put them into a list.
:return: list of alternative search terms to use for querying the knowledge base.
:rtype: list of str
"""
alternativeTerms = self.dataConfig["alternativeSearchTerms"].split(" ")
searchTerms = []
for term in alternativeTerms:
searchTerms.append(term.replace("_", " "))
return searchTerms
def getSearchTerms(self):
"""Gets all search terms to use for querying a knowledge base.
Search terms that will be used are a) the class labels in the dataset, and b) the alternative search terms that were specified in the config file.
:return: list of search terms to use for querying the knowledge base.
:rtype: list of str
"""
searchTerms = list(self.getUniqueLabels())
searchTerms.extend(self.alternativeSearchTerms)
return searchTerms
def getName(self):
"""Returns the full name (including applied knowledge base) of this selector.
:return: selector name.
:rtype: str
"""
return self.name + "_" + self.knowledgebase.getName()
#selector class for modifying integrative approaches
class CombiningSelector(PriorKnowledgeSelector):
"""Super class for prior knoweldge approaches that use a knowledge base AND combine it with any kind of selector, e.g. a traditional approach.
Inherit from this class if you want to implement a feature selector that requires both a knowledge base and another selector, e.g. because it combines information from both.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param tradApproach: any feature selector implementation to use internally, e.g. a traditional approach like ANOVA
:type tradApproach: :class:`FeatureSelector`
"""
def __init__(self, name, knowledgebase, tradApproach):
self.tradSelector = tradApproach
self.tradSelector.disableLogFlush()
super().__init__(name, knowledgebase)
self.tradSelector.setTimeLogs(self.timeLogs)
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Implement this method as desired when inheriting from this class.
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def getName(self):
"""Returns the full name (including applied knowledge base and feature selector) of this selector.
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.tradSelector.getName() + "_" + self.knowledgebase.getName()
def getExternalGenes(self):
"""Gets all genes related to the provided search terms from the knowledge base.
:returns: list of gene names.
:rtype: list of str
"""
start = time.time()
externalGenes = self.knowledgebase.getRelevantGenes(self.getSearchTerms())
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Getting External Genes")
return externalGenes
class NetworkSelector(PriorKnowledgeSelector):
"""Abstract.
Inherit from this method if you want to implement a new network approach that actually conducts feature EXTRACTION, i.e. maps the original data set to have pathway/subnetworks.
Instead of :meth:`FeatureSelector.selectFeatures` implement :meth:`NetworkSelector.selectPathways` when inheriting from this class.
Instances of :class:`NetworkSelector` and inheriting classes also require a :class:`PathwayMapper` object that transfers the dataset to the new feature space.
Custom implementations thus need to implement a) a selection strategy to select pathways and b) a mapping strategy to compute new feature values for the selected pathways.
:param featureMapper: feature mapping object that transfers the feature space.
:type featureMapper: :class:`FeatureMapper` or inheriting class
"""
def __init__(self, name, knowledgebase, featuremapper):
self.featureMapper = featuremapper
super().__init__(name, knowledgebase)
@abc.abstractmethod
def selectPathways(self, pathways):
"""Selects the pathways that will become the new features of the data set.
Implement this method (instead of :meth:`FeatureSelector.selectFeatures` when inheriting from this class.
:param pathways: dict of pathways (pathway names as keys) to select from.
:type pathways: dict
:returns: pathway ranking as dataframe
:rtype: :class:`pandas.DataFrame`
"""
pass
def writeMappedFile(self, mapped_data, fileprefix):
"""Writes the mapped dataset with new feature values to the same directory as the original file is located (it will be automatically processed then).
:param mapped_data: dataframe containing the dataset with mapped feature space.
:type mapped_data: :class:`pandas.DataFrame`
:param fileprefix: prefix of the file name, e.g. the directory path
:type fileprefix: str
:return: absolute path of the file name to store the mapped data set.
:rtype: str
"""
mapped_filepath = fileprefix + "_" + self.getName() + ".csv"
mapped_data.to_csv(mapped_filepath)
return mapped_filepath
def getName(self):
"""Gets the selector name (including the knowledge base).
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.knowledgebase.getName()
def filterPathways(self, pathways):
filtered_pathways = {}
for pathwayName in pathways:
genes = pathways[pathwayName].nodes_by_label.keys()
#check if there is an overlap between the pathway and data set genes
existingGenes = list(set(self.getFeatures()) & set(genes))
if len(existingGenes) > 0:
filtered_pathways[pathwayName] = pathways[pathwayName]
else:
utils.logWarning("WARNING: No genes of pathway " + pathwayName + " found in dataset. Pathway will not be considered")
return filtered_pathways
def selectFeatures(self):
"""Instead of selecting existing features, instances of :class:`NetworkSelector` select pathways or submodules as features.
For that, it first queries its knowledge base for pathways.
It then selects the top k pathways (strategy to be implemented in :meth:`NetworkSelector.selectPathways`) and subsequently maps the dataset to its new feature space.
The mapping will be conducted by an object of :class:`PathwayMapper` or inheriting classes.
If a second dataset for cross-validation is available, the feature space of this dataset will also be transformed.
:returns: absolute path to the pathway ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
overallstart = time.time()
pathways = self.knowledgebase.getRelevantPathways(self.getSearchTerms())
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, overallstart, end, "Get Pathways")
#filter pathways to only those that contain at least one gene from the data set
pathways = self.filterPathways(pathways)
start = time.time()
pathwayRanking = self.selectPathways(pathways)
outputFile = self.output + self.getName() + ".csv"
self.writeRankingToFile(pathwayRanking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Pathway Selection")
pathwayNames = pathwayRanking["attributeName"]
start = time.time()
mapped_data = self.featureMapper.mapFeatures(self.getData(), pathways)
fileprefix = os.path.splitext(self.input)[0]
mapped_filepath = self.writeMappedFile(mapped_data, fileprefix)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Mapping")
#if crossvalidation is enabled, we also have to map the crossvalidation file
if (utils.getConfigBoolean("Evaluation", "enableCrossEvaluation")):
start = time.time()
#we need to get the cross validation file that had been moved into the intermediate folder
crossValidationPath = utils.getConfigValue("General", "crossVal_preprocessing") + "ready/"
crossValidationFile = utils.getConfigValue("Evaluation", "crossEvaluationData")
crossValFilename = os.path.basename(crossValidationFile)
crossValFilepath = crossValidationPath + crossValFilename
crossValData = pd.read_csv(crossValFilepath, index_col=0)
mapped_crossValData = self.featureMapper.mapFeatures(crossValData, pathways)
crossvalFileprefix = os.path.splitext(crossValFilepath)[0]
crossval_mapped_filepath = self.writeMappedFile(mapped_crossValData, crossvalFileprefix)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "CrossValidation Feature Mapping")
overallend = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, overallstart, overallend, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outputFile
############################### FILTER ###############################
class RandomSelector(FeatureSelector):
"""Baseline Selector: Randomly selects any features.
"""
def __init__(self):
super().__init__("Random")
def selectFeatures(self):
"""Randomly select any features from the feature space.
Assigns a score of 0.0 to every feature
:returns: absolute path to the ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outFilename = self.output + filename
#randomly pick any features
with open(self.input, 'r') as infile:
header = infile.readline().rstrip().split(",")
max_index = len(header)
min_index = 2
shuffled_indices = random.sample(range(min_index, max_index), max_index - 2)
with open(outFilename, 'w') as outfile:
header_line = "\"attributeName\"\t\"score\"\n"
outfile.write(header_line)
for i in shuffled_indices:
line = "\"" + header[i] + "\"\t\"0.0000\"\n"
outfile.write(line)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outFilename
class AnovaSelector(PythonSelector):
"""Runs ANOVA feature selection using scikit-learn implementation
"""
def __init__(self):
super().__init__("ANOVA")
def runSelector(self, data, labels):
"""Runs the ANOVA feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
start = time.time()
#setting k to "all" returns all features
selector = SelectKBest(f_classif, k="all")
selector.fit_transform(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "ANOVA")
return selector
class Variance2Selector(PythonSelector):
"""Runs variance-based feature selection using scikit-learn.
"""
def __init__(self):
super().__init__("Variance")
def prepareOutput(self, outputFile, data, selector):
"""Transforms the selector output to a valid ranking and stores it into the specified file.
We need to override this method because variance selector has no attribute scores but variances.
:param outputFile: absolute path of the file to which to write the ranking.
:type outputFile: str
:param data: input dataset.
:type data: :class:`pandas.DataFrame`
:param selector: selector object from scikit-learn.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.variances_
ranking = ranking.sort_values(by='score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Output Preparation")
def runSelector(self, data, labels):
"""Runs the actual variance-based feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
start = time.time()
selector = VarianceThreshold()
selector.fit_transform(data)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Variance_p")
return selector
class MRMRSelector(RSelector):
"""Runs maximum Relevance minimum Redundancy (mRMR) feature selection using the mRMRe R implementation: https://cran.r-project.org/web/packages/mRMRe/index.html
Actually a wrapper class for invoking the R code.
:param scriptName: name of the R script to invoke.
:type scriptName: str
:param maxFeatures: maximum number of features to select. Currently all features (=0) are ranked..
:type maxFeatures: int
"""
def __init__(self):
self.maxFeatures = 0
super().__init__("mRMR")
def createParams(self, outputFile):
"""Sets the parameters the R script requires (input file, output file, maximum number of features).
:return: list of parameters to use for mRMR execution in R.
:rtype: list of str
"""
params = [self.input, outputFile, str(self.maxFeatures)]
return params
class VarianceSelector(RSelector):
"""Runs variance-based feature selection using R genefilter library.
Actually a wrapper class for invoking the R code.
:param scriptName: name of the R script to invoke.
:type scriptName: str
"""
def __init__(self):
super().__init__("VB-FS")
def createParams(self, outputFile):
"""Sets the parameters the R script requires (input file, output file).
:param outputFile: absolute path to the output file that will contain the ranking.
:type outputFile: str
:return: list of parameters to use for mRMR execution in R.
:rtype: list of str
"""
params = [self.input, outputFile]
return params
class InfoGainSelector(JavaSelector):
"""Runs InfoGain feature selection as provided by WEKA: https://www.cs.waikato.ac.nz/ml/weka/
Actually a wrapper class for invoking java code.
"""
def __init__(self):
super().__init__("InfoGain")
def createParams(self):
"""Sets the parameters the java program requires (input file, output file, selector name).
:return: list of parameters to use for InfoGain execution in java.
:rtype: list of str
"""
params = [self.input, self.output, "InfoGain"]
return params
class ReliefFSelector(JavaSelector):
"""Runs ReliefF feature selection as provided by WEKA: https://www.cs.waikato.ac.nz/ml/weka/
Actually a wrapper class for invoking java code.
"""
def __init__(self):
super().__init__("ReliefF")
def createParams(self):
"""Sets the parameters the java program requires (input file, output file, selector name).
:return: list of parameters to use for InfoGain execution in java.
:rtype: list of str
"""
params = [self.input, self.output, "ReliefF"]
return params
############################### FILTER - COMBINED ###############################
class KbSelector(PriorKnowledgeSelector):
"""Knowledge base selector.
Selects features exclusively based the information retrieved from a knowledge base.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase`
"""
def __init__(self, knowledgebase):
super().__init__("KBonly", knowledgebase)
def updateScores(self, entry, newGeneScores):
"""Updates a score entry with the new score retrieved from the knowledge base.
Used by apply function.
:param entry: a gene score entry consisting of the gene name and its score
:type entry: :class:`pandas.Series`
:param newGeneScores: dataframe containing gene scores retrieved from the knowledge base.
:type newGeneScores: :class:`pandas.DataFrame`
:returns: updated series element.
:rtype: :class:`pandas.Series`
"""
gene = entry["attributeName"]
updatedGenes = newGeneScores.iloc[:,0]
#if the gene has a new score, update the entry
if gene in updatedGenes.values:
x = newGeneScores.loc[(newGeneScores["gene_symbol"] == gene), "score"]
#necessary because we want to get the scalar value, not a series
entry["score"] = x.iloc[0]
return entry
def selectFeatures(self):
"""Does the actual feature selection.
Retrieves association scores for genes from the knowledge base based on the given search terms.
:returns: absolute path to the resulting ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
outputFile = self.output + self.getName() + ".csv"
genes = self.getFeatures()
# assign a minimal default score (0.000001) to all genes
attributeNames = genes
scores = [0.00001] * len(genes)
ranking = pd.DataFrame({"attributeName": attributeNames, "score": scores})
kb_start = time.time()
associatedGenes = self.knowledgebase.getGeneScores(self.getSearchTerms())
kb_end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, kb_start, kb_end, "Getting External Gene Scores")
# assign association score to all genes in data
updated_ranking = ranking.apply(self.updateScores, axis = 1, newGeneScores = associatedGenes)
#sort by score, with highest on top
updated_ranking = updated_ranking.sort_values("score", ascending=False)
#save final rankings to file
self.writeRankingToFile(updated_ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished. ########################")
return outputFile
class KBweightedSelector(CombiningSelector):
"""Selects features based on association scores retrieved from the knowledge base and the relevance score retrieved by the (traditional) approach.
Computes the final score via tradScore * assocScore.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param tradApproach: any feature selector implementation to use internally, e.g. a traditional approach like ANOVA
:type tradApproach: :class:`FeatureSelector`
"""
def __init__(self, knowledgebase, tradApproach):
super().__init__("Weighted", knowledgebase, tradApproach)
def updateScores(self, entry, newGeneScores):
"""Updates a score entry with the new score retrieved from the knowledge base.
Used by apply function.
:param entry: a gene score entry consisting of the gene name and its score
:type entry: :class:`pandas.Series`
:param newGeneScores: dataframe containing gene scores retrieved from the knowledge base.
:type newGeneScores: :class:`pandas.DataFrame`
:returns: updated series element.
:rtype: :class:`pandas.Series`
"""
gene = entry["attributeName"]
updatedGenes = newGeneScores.iloc[:,0]
#if the gene has a new score, update the entry
if gene in updatedGenes.values:
x = newGeneScores.loc[(newGeneScores["gene_symbol"] == gene), "score"]
#necessary because we want to get the scalar value, not a series
entry["score"] = x.iloc[0]
return entry
def getName(self):
"""Gets the selector name (including the knowledge base and (traditional) selector).
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.tradSelector.getName() + "_" + self.knowledgebase.getName()
def computeStatisticalRankings(self, intermediateDir):
"""Computes the statistical relevance score of all features using the (traditional) selector.
:param intermediateDir: absolute path to output directory for (traditional) selector (where to write the statistical rankings).
:type intermediateDir: str
:returns: dataframe with statistical ranking.
:rtype: :class:`pandas.DataFrame`
"""
start = time.time()
self.tradSelector.setParams(self.input, intermediateDir, self.loggingDir)
statsRankings = self.tradSelector.selectFeatures()
#load data frame from file
statisticalRankings = pd.read_csv(statsRankings, index_col = 0, sep = "\t", engine = "python")
self.timeLogs = pd.concat([self.timeLogs, self.tradSelector.getTimeLogs()])
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Statistical Ranking")
return statisticalRankings
def computeExternalRankings(self):
"""Computes the association scores for every gene using the knowledge base.
Genes for which no entry could be found receive a default score of 0.000001.
:return: dataframe with statistical ranking.
:rtype: :class:`pandas.DataFrame`
"""
start = time.time()
genes = self.getFeatures()
# assign a minimal default score (0.000001) to all genes
geneScores = dict.fromkeys(genes, 0.000001)
associatedGenes = self.knowledgebase.getGeneScores(self.getSearchTerms())
#assign association score to all genes in data
# assign association score to all genes in data
for gene in geneScores.keys():
# check if score for gene was found in knowledge base
if gene in list(associatedGenes.iloc[:, 0]):
gene_entry = associatedGenes[associatedGenes["gene_symbol"] == gene]
geneScores[gene] = gene_entry.iloc[0, 1]
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "External Ranking")
return geneScores
def combineRankings(self, externalRankings, statisticalRankings):
"""Combines score rankings from both the knowledge base and the (traditional) selector (kb_score * trad_score) to retrieve a final score for every gene.
:param externalRankings: dataframe with ranking from knowledge base.
:type externalRankings: :class:`pandas.DataFrame`
:param statisticalRankings: dataframe with statistical ranking.
:type statisticalRankings: :class:`pandas.DataFrame`
:returns: dataframe with final combined ranking.
:rtype: :class:`pandas.DataFrame`
"""
start = time.time()
#just take over the statistical rankings and alter the scores accordingly
combinedRankings = statisticalRankings.copy()
features = statisticalRankings.index
#go trough every item and combine by weighting
for feature in features:
#update scores - external rankings only provide feature scores, no indices
if feature in externalRankings.keys():
externalScore = externalRankings[feature]
else:
#if no entry exists, set the score to be minimal to not zero up the whole equation in the end
externalScore = 0.00001
if externalScore == 0:
# if no entry exists, set the score to be minimal to not zero up the whole equation in the end
externalScore = 0.00001
statsScore = statisticalRankings.at[feature, "score"]
combinedRankings.at[feature, "score"] = externalScore * statsScore
#reorder genes based on new score
combinedRankings = combinedRankings.sort_values('score', ascending=False)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Ranking Combination")
return combinedRankings
def selectFeatures(self):
"""Runs the feature selection process.
Retrieves scores from knowledge base and (traditional) selector and combines these to a single score.
:returns: absolute path to final output file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
intermediateDir = utils.getConfigValue("General",
"intermediateDir") + self.getName() + "/"
utils.createDirectory(intermediateDir)
outputFile = self.output + self.getName() + ".csv"
#compute gene rankings with traditional approaches
statisticalRankings = self.computeStatisticalRankings(intermediateDir)
#compute gene rankings/associations with external knowledge base
externalRankings = self.computeExternalRankings()
#combine ranking scores
combinedRankings = self.combineRankings(externalRankings, statisticalRankings)
#save final rankings to file
#note: here the gene ids are the index, so write it to file
self.writeRankingToFile(combinedRankings, outputFile, True)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished. ########################")
return outputFile
class LassoPenalty(PriorKnowledgeSelector, RSelector):
"""Runs feature selection by invoking xtune R package: https://cran.r-project.org/web/packages/xtune/index.html
xtune is a Lasso selector that uses feature-individual penalty scores.
These penalty scores are retrieved from the knowledge base.
"""
selectFeatures = RSelector.selectFeatures #make sure the right selectFeatures method will be invoked
getName = PriorKnowledgeSelector.getName
def __init__(self, knowledgebase):
super().__init__("LassoPenalty", knowledgebase)
self.scriptName = "FS_LassoPenalty.R"
def createParams(self, outputFile):
"""Sets the parameters the xtune R script requires (input file, output file, filename containing rankings from knowledge base).
:return: list of parameters to use for xtune execution in R.
:rtype: list of str
"""
externalScore_filename = self.computeExternalRankings()
params = [self.input, outputFile, externalScore_filename]
return params
def computeExternalRankings(self):
"""Computes the association scores for each feature based on the scores retrieved from the knowledge base.
Features that could not be found in the knowledge base receive a default score of 0.000001.
:return: absolute path to the file containing the external rankings.
:rtype: str
"""
start = time.time()
intermediateOutput = utils.getConfigValue("General",
"intermediateDir") + self.getName() + "/"
utils.createDirectory(intermediateOutput)
genes = self.getFeatures()
# assign a minimal default score (0.000001) to all genes
geneScores = dict.fromkeys(genes, 0.000001)
associatedGenes = self.knowledgebase.getGeneScores(self.getSearchTerms())
#assign association score to all genes in data
for gene in geneScores.keys():
#check if score for gene was found in knowledge base
if gene in list(associatedGenes.iloc[:,0]):
gene_entry = associatedGenes[associatedGenes["gene_symbol"] == gene]
geneScores[gene] = gene_entry.iloc[0,1]
#write gene scores to file
scores_filename = intermediateOutput + self.knowledgebase.getName() + "_scores.csv"
scores_df = pd.DataFrame.from_dict(geneScores, orient = "index", columns = ["score"])
scores_df = scores_df.sort_values('score', ascending=False)
scores_df.to_csv(scores_filename, index=True)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "External Ranking")
return scores_filename
############################### WRAPPER ###############################
class WrapperSelector(PythonSelector):
"""Selector implementation for wrapper selectors using scikit-learn.
Currently implements recursive feature eliminatin (RFE) and sequential forward selection (SFS) strategies,
which can be combined with nearly any classifier offered by scikit-learn, e.g. SVM.
:param selector: scikit-learn selector strategy (currently RFE and SFS)
:param classifier: scikit-learn classifier to use for wrapper selection.
"""
def __init__(self, name):
super().__init__(name)
self.classifier = self.createClassifier()
self.selector = self.createSelector()
def createClassifier(self):
"""Creates a classifier instance (from scikit-learn) to be used during the selection process.
To enable the framework to use a new classifier, extend this method accordingly.
:returns: scikit-learn classifier instance.
"""
classifier = None
classifierType = self.name.split("-")[0]
if "KNN" in classifierType:
#attention: assumes that KNN is followed by a number!
k = int(classifierType.replace("KNN", ""))
classifier = KNeighborsClassifier(n_neighbors=k)
elif classifierType == "SVMl":#SVM with linear kernel
classifier = LinearSVC(max_iter=10000)
#elif classifierType == "SVMp": # SVM with polynomial kernel, but it does not have coef component
# classifier = SVC(kernel="poly")
elif classifierType == "LR":
classifier = LinearRegression()
elif classifierType == "NB":
#use MultinomialNB because we cannot assume feature likelihood to be gaussian by default
classifier = MultinomialNB()
elif classifierType == "ANOVA":
classifier = f_classif
else:
raise BaseException("No suitable classifier found for " + classifierType + ". Choose between KNNx, SVMl (SVM with linear kernel), SVMp (SVM with polynomial kernel), LR, NB, ANOVA.")
return classifier
def createSelector(self):
"""Creates a selector instance that leads the selection process.
Currently, sequential forward selection (SFS) and recursive feature elimination (RFE) are implemented.
Extend this method if you want to add another selection strategy.
:returns: scikit-learn selector instance.
"""
selector = None
k = utils.getConfigValue("Gene Selection - General", "selectKgenes")
selectorType = self.name.split("-")[1]
if selectorType == "RFE":
selector = RFE(self.classifier, int(k))
elif selectorType == "SFS":
selector = SFS(self.classifier,
k_features=int(k),
forward=True,
floating=False,
scoring='accuracy',
verbose = 2,
n_jobs = int(utils.getConfigValue("General", "numCores"))/2, #use half of the available cores
cv=0)
return selector
def prepareOutput(self, outputFile, data, selector):
"""Overwrites the inherited prepareOutput method because we need to access the particular selector's coefficients.
The coefficients are extracted as feature scores and will be written to the rankings file.
:param outputFile: selector name
:type outputFile: str
:param data: input dataset to get the feature names.
:type data: :class:`pandas.DataFrame`
:param selector: selector instance that is used during feature selection.
"""
start = time.time()
ranking = pd.DataFrame()
try:
x = selector.estimator_.coef_
except:
try:
x = selector.estimator.coef_
except:
x = selector.est_.coef_
selected_columnIDs = selector.ranking[selector.ranking_ == 1]
selected_features = data.columns[selected_columnIDs]
ranking["attributeName"] = selected_features
ranking["score"] = x[0]
ranking = ranking.sort_values('score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(start, end, "Prepare Output")
def runSelector(self, data, labels):
"""Runs the actual feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
# do gene selection
start = time.time()
#adjust k to not exceed data columns
k = int(utils.getConfigValue("Gene Selection - General", "selectKgenes"))
if k > data.columns.size:
self.selector.n_features_to_select_ = data.columns.size
self.selector.k_features = data.columns.size
# do data scaling
scaling = StandardScaler().fit(data)
scaled_data = scaling.transform(data)
data = scaled_data
self.selector = self.selector.fit(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Wrapper Selector")
return self.selector
class SVMRFESelector(JavaSelector):
"""Executes SVM-RFE with poly-kernel.
Uses an efficient java implementation from WEKA and is thus just a wrapper class to invoke the corresponding jars.
"""
def __init__(self):
super().__init__("SVMpRFE")
def createParams(self):
"""Sets the parameters the java program requires (input file, output file, selector name).
:return: list of parameters to use for InfoGain execution in java.
:rtype: list of str
"""
params = [self.input, self.output, "SVMpRFE"]
return params
############################### EMBEDDED ###############################
class RandomForestSelector(PythonSelector):
"""Selector class that implements RandomForest as provided by scikit-learn.
"""
def __init__(self):
super().__init__("RandomForest")
#override method because there is no scores_ attribute but instead feature_importances_
def prepareOutput(self, outputFile, data, selector):
"""Overwrites the inherited prepareOutput method because we need to access the RandomForest selector's feature importances.
These feature importances are extracted as feature scores and will be written to the rankings file.
:param outputFile: selector name
:type outputFile: str
:param data: input dataset to get the feature names.
:type data: :class:`pandas.DataFrame`
:param selector: RandomForest selector instance that is used during feature selection.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.feature_importances_
ranking = ranking.sort_values('score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Prepare Output")
def runSelector(self, data, labels):
"""Runs the actual feature selection using scikit-learn's RandomForest classifier.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: scikit-learn RandomForestClassifier that ran the selection.
"""
# setting k to "all" returns all features
start = time.time()
clf = RandomForestClassifier(random_state = 0)
# Train the classifier
clf.fit(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Random Forest")
return clf
class LassoSelector(PythonSelector):
"""Selector class that implements Lasso feature selection using scikit-learn.
"""
def __init__(self):
super().__init__("Lasso")
# override method because there is no scores_ attribute but instead feature_importances_
def prepareOutput(self, outputFile, data, selector):
"""Overwrites the inherited prepareOutput method because we need to access Lasso's coefficients.
These coefficients are extracted as feature scores and will be written to the rankings file.
:param outputFile: selector name
:type outputFile: str
:param data: input dataset to get the feature names.
:type data: :class:`pandas.DataFrame`
:param selector: RandomForest selector instance that is used during feature selection.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.coef_
ranking = ranking.sort_values('score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Prepare Output")
def runSelector(self, data, labels):
"""Runs the actual Lasso feature selector using scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: Lasso selector that ran the selection.
"""
# setting k to "all" returns all features
start = time.time()
clf = Lasso()
clf.fit(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Lasso")
return clf
############################### INTEGRATIVE ###############################
class PreFilterSelector(CombiningSelector):
"""Applies a two-level prefiltering strategy for feature selection.
Filters all features that were not retrieved by a knowledge base based on the search terms provided in the config file.
Applies a (traditional) feature selector on the remaining features afterwards.
For traditional univariate filter approaches, the results retrieved by this class and :class:`PostFilterSelector` will be the same.
"""
def __init__(self, knowledgebase, tradApproach):
super().__init__("Prefilter", knowledgebase, tradApproach)
def selectFeatures(self):
"""Carries out feature selection.
First queries the assigned knowledge base to get genes that are associated to the given search terms.
Filter feature set of input data set to contain only features that are in the retrieved gene set.
Apply (traditional) selector on the filtered data set.
:returns: absolute path to rankings file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
intermediateOutput = utils.getConfigValue("General",
"intermediateDir") + self.getName() + "/"
utils.createDirectory(intermediateOutput)
start = time.time()
externalGenes = self.knowledgebase.getRelevantGenes(self.getSearchTerms())
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Getting Relevant Genes")
outputFilename = self.output + self.getName() + ".csv"
#stop if no genes could be found for filtering
if len(externalGenes) == 0:
return outputFilename
#filter input by externalGenes, keep classLabel and sampleID
matrix = pd.read_csv(self.input)
finalCols = matrix.columns.to_list()[0:2]
# filter externalGenes by genes available in the rankings
dataGenes = set(matrix.columns.to_list())
sharedGenes = list(dataGenes & set(externalGenes))
finalCols.extend(sharedGenes)
filteredMatrix = matrix[finalCols]
#write to file and set this as new input
filtered_input = intermediateOutput + self.getName() + ".csv"
filteredMatrix.to_csv(filtered_input, index = False)
self.tradSelector.setParams(filtered_input, intermediateOutput, self.loggingDir)
rankingFile = self.tradSelector.selectFeatures()
self.timeLogs = pd.concat([self.timeLogs, self.tradSelector.getTimeLogs()])
#rename ranking file so that we can recognize it in the output files
os.rename(rankingFile, outputFilename)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished. ########################")
return outputFilename
class PostFilterSelector(CombiningSelector):
"""Applies a two-level postfiltering strategy for feature selection.
Applies (traditional) feature selection to the input data set.
Afterwards, removes all genes for which no information in the corresponding knowledge base was found based on the search terms provided in the config file.
For traditional univariate filter approaches, the results retrieved by this class and :class:`PreFilterSelector` will be the same.
"""
def __init__(self, knowledgebase, tradApproach):
super().__init__("Postfilter", knowledgebase, tradApproach)
def selectFeatures(self):
"""Carries out feature selection.
First executes (traditional) selector.
Then queries the assigned knowledge base to get genes that are associated to the given search terms.
Finally filters feature set to contain only features that are in the retrieved gene set.
:returns: absolute path to rankings file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
intermediateOutput = utils.getConfigValue("General",
"intermediateDir") + self.getName() + "/"
utils.createDirectory(intermediateOutput)
outputFile = self.output + self.getName() + ".csv"
self.tradSelector.setParams(self.input, intermediateOutput, self.loggingDir)
rankingFile = self.tradSelector.selectFeatures()
self.timeLogs = pd.concat([self.timeLogs, self.tradSelector.getTimeLogs()])
ranking = utils.loadRanking(rankingFile)
kb_start = time.time()
#filter ranking by genes from knowledge base
externalGenes = self.knowledgebase.getRelevantGenes(self.getSearchTerms())
kb_end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, kb_start, kb_end, "Getting Relevant Genes")
#filter externalGenes by genes available in the rankings
dataGenes = set(ranking["attributeName"])
sharedGenes = list(dataGenes & set(externalGenes))
filteredRanking = ranking[ranking["attributeName"].isin(sharedGenes)]
self.writeRankingToFile(filteredRanking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() +".csv")
utils.logInfo("######################## " + self.getName() + " finished. ########################")
return outputFile
class ExtensionSelector(CombiningSelector):
"""Selector implementation inspired by SOFOCLES:
"SoFoCles: Feature filtering for microarray classification based on Gene Ontology", Papachristoudis et al., Journal of Biomedical Informatics, 2010
This selector carries out (traditional) feature selection and in parallel retrieves relevant genes from a knowledge base based on the provided search terms.
The ranking is then adapted by alternating the feature ranking retrieved by the (traditiona) selection approach and the externally retrieved genes.
This is kind of related to an extension approach, where a feature ranking that was retrieved by a traditional approach is extended by such external genes.
"""
def __init__(self, knowledgebase, tradApproach):
super().__init__("Extension", knowledgebase, tradApproach)
def selectFeatures(self):
"""Carries out feature selection.
Executes (traditional) selector and separately retrieves genes from the assigned knowledge base based on the search terms specified in the config.
Finally merges the two feature lists alternating to form an "extended" feature ranking.
:returns: absolute path to rankings file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
intermediateOutput = utils.getConfigValue("General",
"intermediateDir") + self.getName() + "/"
utils.createDirectory(intermediateOutput)
outputFile = self.output + self.getName() + ".csv"
self.tradSelector.setParams(self.input, intermediateOutput, self.loggingDir)
rankingFile = self.tradSelector.selectFeatures()
self.timeLogs = pd.concat([self.timeLogs, self.tradSelector.getTimeLogs()])
trad_ranking = utils.loadRanking(rankingFile)
# extend ranking by genes from knowledge base
kb_start = time.time()
ext_ranking = self.knowledgebase.getGeneScores(self.getSearchTerms())
kb_end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, kb_start, kb_end, "Getting External Gene Scores")
#select top k genes from ext_ranking
#topK = int(utils.getConfigValue("Evaluation", "topKmax"))
#rename columns
ext_ranking.columns = ["attributeName", "score_ext"]
#select only genes from dataset from external ranking
ext_ranking_genes = trad_ranking[trad_ranking["attributeName"].isin(ext_ranking["attributeName"])]
#join gene indices with external ranking to get index column
x = ext_ranking_genes.reindex(columns =["attributeName"])
merged_ranking = x.merge(ext_ranking, on= "attributeName")
merged_ranking = merged_ranking.loc[:,["attributeName", "score_ext"]]
#rename score column
merged_ranking.columns = ["attributeName", "score"]
# sort by scores
merged_ranking.sort_values('score', ascending=False, inplace=True)
ext_ranking = merged_ranking
#ext_len = topK
#if len(merged_ranking.index) < topK:
# ext_len = len(merged_ranking.index)
#topK_ext = merged_ranking.head(ext_len)
#reset index of topK_ext (needed for interleaving)
ext_ranking.index = list(range(1,2 * len(ext_ranking.index) + 1 , 2))
trad_ranking.index = list(range(0, 2 * len(trad_ranking.index), 2))
#interleave both dataframes
interleaved_ranking = pd.concat([trad_ranking, ext_ranking]).sort_index()
#remove duplicate entries and keep first occurrences
interleaved_ranking.drop_duplicates(subset=interleaved_ranking.columns[0], keep="first", inplace=True)
#if len(interleaved_ranking) < topK:
# topK = len(interleaved_ranking)
#cut ranking to topK
#final_ranking = interleaved_ranking.head(topK)
#adjust relevance scores of external genes so that sorting stays the same (assign score between scores of
#gene before and after own rank
indices_to_change = range(1, len(interleaved_ranking)-1, 2)
for index in indices_to_change:
pregene_entry = interleaved_ranking.iloc[index - 1,]
pre_score = pregene_entry.iloc[1]
postgene_entry = interleaved_ranking.iloc[index + 1,]
post_score = postgene_entry.iloc[1]
#set new score to something between scores of gene before and after in this ranking
gene_entry = interleaved_ranking.iloc[index,]
gene_entry["score"] = random.uniform(pre_score, post_score)
self.writeRankingToFile(interleaved_ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished. ########################")
return outputFile
class NetworkActivitySelector(NetworkSelector):
"""Selector implementation that selects a set of pathways from the knowledge base and maps the feature space to the pathways.
Pathway ranking scores are computed based on the average ANOVA p-value of its member genes and the sample classes.
This method is also used by Chuang et al. and Tian et al. (Discovering statistically significant pathways in expression profiling studies)
Pathway feature values are computed with an instance of :class:`FeatureMapper` or inheriting classes, whose mapping strategies can vary.
If pathways should be selected according to another strategy, use this class as an example implementation to implement a new class that inherits from :class:`NetworkSelector`.
"""
def __init__(self, knowledgebase, featuremapper):
if isinstance(featuremapper, CORGSActivityMapper):
name = "CorgsNetworkActivity"
elif isinstance(featuremapper, PathwayActivityMapper):
name = "NetworkActivity"
else:
name = "NetworkActivity"
super().__init__(name, knowledgebase, featuremapper)
def selectPathways(self, pathways):
"""Computes a pathway ranking for the input pathways.
Computes a pathway score based on the average ANOVA's f-test p-values of a pathway's member genes and the sample classes.
:param pathways: selector name
:type pathways: str
:returns: pathway ranking with pathway scores
:rtype: :class:`pandas.DataFrame`
"""
#this selector selects the most significant pathways according to the average t-test of its member genes and the sample classes
#method used by Chuang et al. and Tian et al. (Discovering statistically significant pathways in expression profiling studies)
#as we can have > 2 classes, we just use ANOVA instead of t-test
dataset = self.getUnlabeledData()
# define input params
# load data
labels = self.getLabels()
le = preprocessing.LabelEncoder()
numeric_labels = le.fit_transform(labels)
#run ANOVA (if we have just 2 classes, ANOVA is equivalent to the t-test)
selector = SelectKBest(f_classif, k="all")
selector.fit_transform(dataset, numeric_labels)
pvals = pd.Series(selector.pvalues_, index = dataset.columns)
#for every pathway, get the average score from its member genes
pathway_scores = {}
for pathwayName in pathways:
genes = pathways[pathwayName].nodes_by_label.keys()
#check if all genes in genes are in the pvals matrix
existingGenes = list(set(pvals.index) & set(genes))
score = pvals.loc[existingGenes].mean()
pathway_scores[pathwayName] = score
# if we do not have any pathways, stop here
if not pathway_scores:
ranking = pd.DataFrame(columns=["attributeName", "score"])
else:
# sort pathways by score in ascending order
sorted_pathways = sorted(pathway_scores.items(), key=lambda x: x[1])
#create output: a file with: feature, score, and file index
feature_ranking_list = list()
for pathway in sorted_pathways:
feature_ranking_list.append([pathway[0], pathway[1]])
ranking = pd.DataFrame(data = np.array(feature_ranking_list), columns = ["attributeName", "score"])
return ranking
############################### FEATURE MAPPERS ###############################
class FeatureMapper():
"""Abstract.
Inherit from this class and implement :meth:`FeatureMapper.mapFeatures` to implement a new mapping strategy.
Maps the feature space of the given input data to a given set of pathways.
Computes a new feature value for every feature and sample based on the implemented strategy.
"""
def __init__(self, ):
super().__init__()
@abc.abstractmethod
def mapFeatures(self, original_data, pathways):
"""Abstract method.
Implement this method when inheriting from this class.
Carries out the actual feature mapping.
:param original_data: the original data set of which to map the feature space.
:type original_data: :class:`pandas.DataFrame`
:param pathways: dict of pathway names as keys and corresponding pathway :class:`pypath.Network` objects as values
:type pathways: dict
:returns: the transformed data set with new feature values
:rtype: :class:`pandas.DataFrame`
"""
pass
def getUnlabeledData(self, dataset):
"""Removes the labels from the data set.
:param dataset: data set from which to remove the labels.
:type dataset: :class:`pandas.DataFrame`
:returns: data set without labels.
:rtype: :class:`pandas.DataFrame`
"""
return dataset.loc[:, dataset.columns != "classLabel"]
def getLabels(self, dataset):
"""Gets the dataset labels.
:param dataset: data set from which to extract the labels.
:type dataset: :class:`pandas.DataFrame`
:returns: label vector of the data set.
:rtype: :class:`pandas.Series`
"""
return dataset.iloc[:,0]
def getFeatures(self, dataset):
"""Gets the features of a data set.
:param dataset: data set from which to extract the features.
:type dataset: :class:`pandas.DataFrame`
:returns: feature vector of the data set.
:rtype: :class:`pandas.Series`
"""
return dataset.columns[1:]
def getSamples(self, dataset):
"""Gets all samples in a data set.
:param dataset: data set from which to extract the samples.
:type dataset: :class:`pandas.DataFrame`
:returns: list of samples from the data set.
:rtype: list
"""
return dataset.index.tolist()
def getPathwayGenes(self, pathway, genes):
"""Returns the intersection of a given set of genes and the genes contained in a given pathway.
:param pathway: pathway object from which to get the genes.
:type pathway: :class:`pypath.Network`
:param genes: list of gene names.
:type genes: list of str
:returns: list of genes that are contained in both the pathway and the gene list.
:rtype: list of str
"""
pathway_genes = pathway.nodes_by_label
contained_pathway_genes = list(set(pathway_genes.keys()) & set(genes))
return contained_pathway_genes
class CORGSActivityMapper(FeatureMapper):
"""Pathway mapper that implements the strategy described by Lee et al.: "Inferring Pathway Activity toward Precise Disease Classification"
Identifies CORGS genes for every pathway: uses random search to find the minimal set of genes for which the pathway activity score is maximal.
First, every sample receives an activity score, which is the average expression level of the (CORGS) genes / number of genes.
The computed activity scores are then used for f-testing with the class labels, and the p-values are the new pathway feature values.
These steps are executed again and again until the p-values are not decreasing anymore.
"""
def __init__(self, ):
super().__init__()
def getANOVAscores(self, data, labels):
"""Applies ANOVA f-test to test the association/correlation of a feature (pathway) with a given label.
The feature has activity scores (computed from CORGS genes) for every sample, which are to be tested for the labels.
:param data: the data set which to test for correlation with the labels (typically feature scores of a pathway for samples).
:type data: :class:`pandas.DataFrame`
:param labels: class labels to use for f-test.
:type labels: :class:`pandas.Series`
:returns: series of p-values for every sample.
:rtype: :class:`pandas.Series`
"""
le = preprocessing.LabelEncoder()
numeric_labels = le.fit_transform(labels)
# run ANOVA (if we have just 2 classes, ANOVA is equivalent to the t-test)
selector = SelectKBest(f_classif, k="all")
if (isinstance(data, pd.Series)):
genes = data.name
data = data.values.reshape(-1, 1)
else:
genes = data.columns
selector.fit_transform(data, numeric_labels)
pvals = pd.Series(selector.pvalues_, index=genes)
return pvals
def computeActivityScore(self, sampleExpressionLevels):
"""Computes the activity score of a given set of genes for a specific sample.
The activity score of a sample is the mean expression value of the given genes divided by the overall number of given genes.
:param sampleExpressionLevels: data set containing expression levels from a given set of genes for samples.
:type sampleExpressionLevels: :class:`pandas.DataFrame`
:returns: activity scores for the given samples.
:rtype: :class:`pandas.Series`
"""
#activityScore for a sample is the average expression level of the genes / number of genes
x = sampleExpressionLevels.mean() / len(sampleExpressionLevels)
return x
def computeActivityVector(self, expressionLevels):
"""Computes the activity score of a given set of genes for a all samples.
:param expressionLevels: input data set of expression levels for a given set of (CORGS) genes.
:type expressionLevels: :class:`pandas.DataFrame`
:returns: instance of a feature selector implementation.
:rtype: :class:`pandas.DataFrame` or inheriting class
"""
activityVector = expressionLevels.apply(self.computeActivityScore, axis = 1)
return activityVector
def mapFeatures(self, original_data, pathways):
"""Carries out the actual feature mapping.
Follows the strategy described by Lee et al.: "Inferring Pathway Activity toward Precise Disease Classification"
Identifies CORGS genes for every pathway: uses random search to find the minimal set of genes for which the pathway activity score is maximal.
First, every sample receives an activity score, which is the average expression level of the (CORGS) genes / number of genes.
The computed activity scores are then used for f-testing with the class labels, and the p-values are the new pathway feature values.
These steps are executed again and again until the p-values are not decreasing anymore.
:param original_data: the original data set of which to map the feature space.
:type original_data: :class:`pandas.DataFrame`
:param pathways: dict of pathway names as keys and corresponding pathway :class:`pypath.Network` objects as values
:type pathways: dict
:returns: the transformed data set with new feature values
:rtype: :class:`pandas.DataFrame`
"""
# create pathway activity score S(G) for pathway x and sample y:
# find minimal set of genes G so that S(G) is locally maximal
unlabeledData = self.getUnlabeledData(original_data)
genes = self.getFeatures(original_data)
samples = self.getSamples(original_data)
labels = self.getLabels(original_data)
# create a new dataframe that has pathways as features
pathways_scores = {}
pathways_scores["Unnamed: 0"] = samples
classLabels = pd.DataFrame(original_data.loc[:, "classLabel"])
# for every pathway, compute activity score for every sample
for pathwayname in pathways.keys():
# 1. map genes to network
pathway = pathways[pathwayname]
pathwaygenes = self.getPathwayGenes(pathway, genes)
if (len(pathwaygenes) == 0):
# if none of the genes in the pathway are in the data, just set the pathwayscore to 0
utils.logWarning(
"WARNING: No genes of pathway " + pathwayname + " found in dataset for feature mapping. Assign activity score of 0.0 to all samples.")
corgs_activities = [0.0] * len(samples)
continue
#2. take top scored gene as seed gene and compute S(gene)
k = 0
#set initial scores to make sure we get at least one run
score_k1 = 0
score_k = -1
max_k = len(pathwaygenes)
#repeat while S(k+1) > S(k), k = #corgs genes
# stop if we have already included all genes of pathway
while (score_k1 > score_k) and (k <= max_k):
#former k+1 score is now our k score
score_k = score_k1
corgs_genes = pathwaygenes[:k+1]
corgs_activities = self.computeActivityVector(unlabeledData.loc[:,corgs_genes])
score_k1 = self.getANOVAscores(corgs_activities, labels)[0]
k += 1
# once greedy search has finished, collect activity scores as new pathway feature values
pathways_scores[pathwayname] = corgs_activities
# create final dataframe with pathways as features
pathwaydata = pd.DataFrame.from_dict(data=pathways_scores)
pathwaydata = pathwaydata.set_index("Unnamed: 0")
# add class labels to dataset
mappedData = classLabels.merge(pathwaydata, right_index = True, left_index = True)
# 4. return new features
return mappedData
class PathwayActivityMapper(FeatureMapper):
"""Pathway mapper that implements a strategy that is related to Vert and Kanehisa's strategy: Vert, Jean-Philippe, and <NAME>. "Graph-driven feature extraction from microarray data using diffusion kernels and kernel CCA." NIPS. 2002.
Computes pathway activity scores for every sample and pathway as new feature values.
The feature value is the average of: expression level weighted by gene variance and neighbor correlation score)
"""
def __init__(self, ):
super().__init__()
def getAverageCorrelation(self, correlations, gene, neighbors):
"""Computes the average correlation from the correlations of a given gene and its neighbors.
:param correlations: correlation matrix of all genes.
:type correlations: :class:`pandas.DataFrame`
:param gene: gene name whose average neighbor correlation to compute.
:type gene: str
:param neighbors: list of gene names that are neighbors of the given gene.
:type neighbors: list of str
:returns: average correlation value.
:rtype: float
"""
containedNeighbors = list(set(neighbors) & set(correlations.columns))
if len(containedNeighbors) == 0:
return 0
else:
entries = correlations.loc[gene, containedNeighbors]
return entries.mean()
def computeGeneVariances(self, data):
"""Computes the variances for every gene across all samples.
:param data: data set with expression values.
:type data: :class:`pandas.DataFrame`
:returns: variance for every gene.
:rtype: :class:`pandas.Series`
"""
selector = VarianceThreshold()
selector.fit_transform(data)
return selector.variances_
def mapFeatures(self, original_data, pathways):
"""Executes the actual feature mapping procedure.
A feature value is the average of (for every gene in a pathway): (expression level weighted by gene variance and neighbor correlation score)
:param original_data: the original data set of which to map the feature space.
:type original_data: :class:`pandas.DataFrame`
:param pathways: dict of pathway names as keys and corresponding pathway :class:`pypath.Network` objects as values
:type pathways: dict
:returns: the transformed data set with new feature values
:rtype: :class:`pandas.DataFrame`
"""
#create pathway activity score for pathway x and sample y: average of all (gene expressions weighted by gene variance and neighbor correlations)
# compute gene variances
unlabeledData = self.getUnlabeledData(original_data)
genes = self.getFeatures(original_data)
samples = self.getSamples(original_data)
variances = self.computeGeneVariances(unlabeledData)
vars = pd.Series(variances, genes)
# compute correlation scores for genes
correlations = unlabeledData.corr(method="pearson")
# create a new dataframe that has pathways as features
pathways_scores = {}
pathways_scores["Unnamed: 0"] = samples
classLabels = pd.DataFrame(original_data.loc[:,"classLabel"])
# for every pathway, compute activity score for every sample
# set pathways to the correct index as provided in the ranking
for pathwayname in pathways.keys():
# 2. map genes to network
pathway = pathways[pathwayname]
pathwaygenes = self.getPathwayGenes(pathway, genes)
#3. precompute variance and average correlations for every gene in the pathway
scoreparts = {}
for gene in pathwaygenes:
variance = vars.loc[gene]
geneNeighbors = pathway.partners(gene)
neighborNames = [neighbor.label for neighbor in geneNeighbors]
average_correlations = self.getAverageCorrelation(correlations, gene, neighborNames)
scoreparts[gene] = variance * average_correlations
# 4. compute pathway activity score for every sample
prescores = pd.Series(scoreparts)
pathwayscores = []
if (len(pathwaygenes) == 0):
# if none of the genes in the pathway are in the data, just set the pathwayscore to 0
utils.logWarning("WARNING: No genes of pathway " + pathwayname + " found in dataset. Assign activity score of 0.0 to all samples.")
pathwayscores = [0.0] * len(samples)
else:
for sample in samples:
expression_values = original_data.loc[sample, pathwaygenes]
score = expression_values * prescores.loc[pathwaygenes]
# activity score for pathway x and sample y: average of all (gene expressions weighted by gene variance and neighbor correlations)
activityScore = score.mean()
pathwayscores.append(activityScore)
pathways_scores[pathwayname] = pathwayscores
# create final dataframe with pathways as features
pathwaydata = | pd.DataFrame.from_dict(data=pathways_scores) | pandas.DataFrame.from_dict |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
from matplotlib.lines import Line2D
import scipy
import random
import os
import glob
import sklearn
from sklearn.preprocessing import MinMaxScaler
from __future__ import division #so 1/2 = 0.5, not 1/2=0
import pandas as pd
def extract_ansl_data(file, outpath):
""" function to extract data from ansl output csv
takes path to directory as input"""
# necessary lists for data extraction
conditions = []
items = []
volumes = []
frequencies = []
orders = []
settings = []
subjects = []
for filename in glob.iglob(file): # all specified files in directory
df = pd.read_csv(filename)
for i in df.columns:
for item in df[i]:
# get subject info
subject = filename.split('/')
subject = subject[6]
subject = subject[:5]
subjects.append(subject)
item= item.split('/')
item= item[2]
items.append(item)
# get info on setting
setting = filename.split('/')
if 'baseline' in filename:
settings.append('baseline')
elif 'epi_fast(TR1s)' in filename:
settings.append('epi_fast(TR1s)')
elif 'epi_standard(TR2s)' in filename:
settings.append('epi_standard(TR2s)')
else:
settings.append('structural(T1w)')
# get info on stimuli order
if 'order1' in i:
orders.append('1')
elif 'order2' in i:
orders.append('2')
# get info on volume condition
if 'decrease' in i:
conditions.append('decrease')
elif 'increase' in i:
conditions.append('increase')
# extract discovered volume
if '10dBFS' in item:
volumes.append(-10)
elif '20dBFS' in item:
volumes.append(-20)
elif '30dBFS' in item:
volumes.append(-30)
elif '40dBFS' in item:
volumes.append(-40)
elif '50dBFS' in item:
volumes.append(-50)
elif '60dBFS' in item:
volumes.append(-60)
elif '70dBFS' in item:
volumes.append(-70)
elif '80dBFS' in item:
volumes.append(-80)
elif '90dBFS' in item:
volumes.append(-90)
elif '100dBFS' in item:
volumes.append(-100)
elif 'not_discovered' in item:
volumes.append('0')
if '150hz' in item:
frequencies.append(150)
elif '250hz' in item:
frequencies.append(250)
elif '500hz' in item:
frequencies.append(500)
elif '1000hz' in item:
frequencies.append(1000)
elif '1500hz' in item:
frequencies.append(1500)
elif '2000hz' in item:
frequencies.append(2000)
elif '2250hz' in item:
frequencies.append(2250)
elif '2500hz' in item:
frequencies.append(2500)
elif '2750hz' in item:
frequencies.append(2750)
elif '3000hz' in item:
frequencies.append(3000)
elif '4000hz' in item:
frequencies.append(4000)
elif '6000hz' in item:
frequencies.append(6000)
elif '8000hz' in item:
frequencies.append(8000)
# write to dataframe
df= pd.DataFrame({'subject': subjects,
'setting': settings,
'condition': conditions,
'order': orders,
'item' : items,
'Level (dBFS)' : volumes,
'Frequency (Hz)': frequencies})
df.to_csv(opj(path, 'ansl_output.csv'), index=False)
return df
def convert_ansl_data_dbfs(file, outpath):
df = | pd.read_csv(file) | pandas.read_csv |
#!/usr/bin/env python3
import argparse
import collections
import copy
import datetime
import functools
import glob
import json
import logging
import math
import operator
import os
import os.path
import re
import sys
import typing
import warnings
import matplotlib
import matplotlib.cm
import matplotlib.dates
import matplotlib.pyplot
import matplotlib.ticker
import networkx
import numpy
import pandas
import tabulate
import tqdm
import rows.console
import rows.load
import rows.location_finder
import rows.model.area
import rows.model.carer
import rows.model.datetime
import rows.model.historical_visit
import rows.model.history
import rows.model.json
import rows.model.location
import rows.model.metadata
import rows.model.past_visit
import rows.model.problem
import rows.model.rest
import rows.model.schedule
import rows.model.service_user
import rows.model.visit
import rows.parser
import rows.plot
import rows.routing_server
import rows.settings
import rows.sql_data_source
def handle_exception(exc_type, exc_value, exc_traceback):
"""Logs uncaught exceptions"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
else:
logging.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
__COMMAND = 'command'
__PULL_COMMAND = 'pull'
__INFO_COMMAND = 'info'
__SHOW_WORKING_HOURS_COMMAND = 'show-working-hours'
__COMPARE_BOX_PLOTS_COMMAND = 'compare-box-plots'
__COMPARE_DISTANCE_COMMAND = 'compare-distance'
__COMPARE_WORKLOAD_COMMAND = 'compare-workload'
__COMPARE_QUALITY_COMMAND = 'compare-quality'
__COMPARE_COST_COMMAND = 'compare-cost'
__CONTRAST_WORKLOAD_COMMAND = 'contrast-workload'
__COMPARE_PREDICTION_ERROR_COMMAND = 'compare-prediction-error'
__COMPARE_BENCHMARK_COMMAND = 'compare-benchmark'
__COMPARE_BENCHMARK_TABLE_COMMAND = 'compare-benchmark-table'
__COMPARE_LITERATURE_TABLE_COMMAND = 'compare-literature-table'
__COMPARE_THIRD_STAGE_PLOT_COMMAND = 'compare-third-stage-plot'
__COMPARE_THIRD_STAGE_TABLE_COMMAND = 'compare-third-stage-table'
__COMPARE_THIRD_STAGE_SUMMARY_COMMAND = 'compare-third-stage-summary'
__COMPARE_QUALITY_OPTIMIZER_COMMAND = 'compare-quality-optimizer'
__COMPUTE_RISKINESS_COMMAND = 'compute-riskiness'
__COMPARE_DELAY_COMMAND = 'compare-delay'
__TYPE_ARG = 'type'
__ACTIVITY_TYPE = 'activity'
__VISITS_TYPE = 'visits'
__COMPARE_TRACE_COMMAND = 'compare-trace'
__CONTRAST_TRACE_COMMAND = 'contrast-trace'
__COST_FUNCTION_TYPE = 'cost_function'
__DEBUG_COMMAND = 'debug'
__AREA_ARG = 'area'
__FROM_ARG = 'from'
__TO_ARG = 'to'
__FILE_ARG = 'file'
__DATE_ARG = 'date'
__BASE_FILE_ARG = 'base-file'
__CANDIDATE_FILE_ARG = 'candidate-file'
__SOLUTION_FILE_ARG = 'solution'
__PROBLEM_FILE_ARG = 'problem'
__OUTPUT_PREFIX_ARG = 'output_prefix'
__OPTIONAL_ARG_PREFIX = '--'
__BASE_SCHEDULE_PATTERN = 'base_schedule_pattern'
__CANDIDATE_SCHEDULE_PATTERN = 'candidate_schedule_pattern'
__SCHEDULE_PATTERNS = 'schedule_patterns'
__LABELS = 'labels'
__OUTPUT = 'output'
__ARROWS = 'arrows'
__FILE_FORMAT_ARG = 'output_format'
__color_map = matplotlib.pyplot.get_cmap('tab20c')
FOREGROUND_COLOR = __color_map.colors[0]
FOREGROUND_COLOR2 = 'black'
def get_or_raise(obj, prop):
value = getattr(obj, prop)
if not value:
raise ValueError('{0} not set'.format(prop))
return value
def get_date_time(value):
date_time = datetime.datetime.strptime(value, '%Y-%m-%d')
return date_time
def get_date(value):
value_to_use = get_date_time(value)
return value_to_use.date()
def configure_parser():
parser = argparse.ArgumentParser(prog=sys.argv[0],
description='Robust Optimization '
'for Workforce Scheduling command line utility')
subparsers = parser.add_subparsers(dest=__COMMAND)
pull_parser = subparsers.add_parser(__PULL_COMMAND)
pull_parser.add_argument(__AREA_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FROM_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __TO_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT_PREFIX_ARG)
info_parser = subparsers.add_parser(__INFO_COMMAND)
info_parser.add_argument(__FILE_ARG)
compare_distance_parser = subparsers.add_parser(__COMPARE_DISTANCE_COMMAND)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __PROBLEM_FILE_ARG, required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __SCHEDULE_PATTERNS, nargs='+', required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __LABELS, nargs='+', required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FILE_FORMAT_ARG, default=rows.plot.FILE_FORMAT)
compare_workload_parser = subparsers.add_parser(__COMPARE_WORKLOAD_COMMAND)
compare_workload_parser.add_argument(__PROBLEM_FILE_ARG)
compare_workload_parser.add_argument(__BASE_SCHEDULE_PATTERN)
compare_workload_parser.add_argument(__CANDIDATE_SCHEDULE_PATTERN)
compare_workload_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FILE_FORMAT_ARG, default=rows.plot.FILE_FORMAT)
debug_parser = subparsers.add_parser(__DEBUG_COMMAND)
# debug_parser.add_argument(__PROBLEM_FILE_ARG)
# debug_parser.add_argument(__SOLUTION_FILE_ARG)
compare_trace_parser = subparsers.add_parser(__COMPARE_TRACE_COMMAND)
compare_trace_parser.add_argument(__PROBLEM_FILE_ARG)
compare_trace_parser.add_argument(__FILE_ARG)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __COST_FUNCTION_TYPE, required=True)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __DATE_ARG, type=get_date)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __ARROWS, type=bool, default=False)
contrast_workload_parser = subparsers.add_parser(__CONTRAST_WORKLOAD_COMMAND)
contrast_workload_parser.add_argument(__PROBLEM_FILE_ARG)
contrast_workload_parser.add_argument(__BASE_FILE_ARG)
contrast_workload_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_workload_parser.add_argument(__OPTIONAL_ARG_PREFIX + __TYPE_ARG)
compare_prediction_error_parser = subparsers.add_parser(__COMPARE_PREDICTION_ERROR_COMMAND)
compare_prediction_error_parser.add_argument(__BASE_FILE_ARG)
compare_prediction_error_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_trace_parser = subparsers.add_parser(__CONTRAST_TRACE_COMMAND)
contrast_trace_parser.add_argument(__PROBLEM_FILE_ARG)
contrast_trace_parser.add_argument(__BASE_FILE_ARG)
contrast_trace_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __DATE_ARG, type=get_date, required=True)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __COST_FUNCTION_TYPE, required=True)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
show_working_hours_parser = subparsers.add_parser(__SHOW_WORKING_HOURS_COMMAND)
show_working_hours_parser.add_argument(__FILE_ARG)
show_working_hours_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_quality_parser = subparsers.add_parser(__COMPARE_QUALITY_COMMAND)
compare_quality_optimizer_parser = subparsers.add_parser(__COMPARE_QUALITY_OPTIMIZER_COMMAND)
compare_quality_optimizer_parser.add_argument(__FILE_ARG)
subparsers.add_parser(__COMPARE_COST_COMMAND)
compare_benchmark_parser = subparsers.add_parser(__COMPARE_BENCHMARK_COMMAND)
compare_benchmark_parser.add_argument(__FILE_ARG)
subparsers.add_parser(__COMPARE_LITERATURE_TABLE_COMMAND)
subparsers.add_parser(__COMPARE_BENCHMARK_TABLE_COMMAND)
subparsers.add_parser(__COMPUTE_RISKINESS_COMMAND)
subparsers.add_parser(__COMPARE_DELAY_COMMAND)
subparsers.add_parser(__COMPARE_THIRD_STAGE_TABLE_COMMAND)
subparsers.add_parser(__COMPARE_THIRD_STAGE_PLOT_COMMAND)
compare_box_parser = subparsers.add_parser(__COMPARE_BOX_PLOTS_COMMAND)
compare_box_parser.add_argument(__PROBLEM_FILE_ARG)
compare_box_parser.add_argument(__BASE_FILE_ARG)
compare_box_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
third_stage_summary_parser = subparsers.add_parser(__COMPARE_THIRD_STAGE_SUMMARY_COMMAND)
third_stage_summary_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
return parser
def split_delta(delta: datetime.timedelta) -> typing.Tuple[int, int, int, int]:
days = int(delta.days)
hours = int((delta.total_seconds() - 24 * 3600 * days) // 3600)
minutes = int((delta.total_seconds() - 24 * 3600 * days - 3600 * hours) // 60)
seconds = int(delta.total_seconds() - 24 * 3600 * days - 3600 * hours - 60 * minutes)
assert hours < 24
assert minutes < 60
assert seconds < 60
return days, hours, minutes, seconds
def get_time_delta_label(total_travel_time: datetime.timedelta) -> str:
days, hours, minutes, seconds = split_delta(total_travel_time)
time = '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
if days == 0:
return time
elif days == 1:
return '1 day ' + time
else:
return '{0} days '.format(days) + time
def pull(args, settings):
area_code = get_or_raise(args, __AREA_ARG)
from_raw_date = get_or_raise(args, __FROM_ARG)
to_raw_date = get_or_raise(args, __TO_ARG)
output_prefix = get_or_raise(args, __OUTPUT_PREFIX_ARG)
console = rows.console.Console()
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
location_cache = rows.location_finder.FileSystemCache(settings)
location_finder = rows.location_finder.MultiModeLocationFinder(location_cache, user_tag_finder, timeout=5.0)
data_source = rows.sql_data_source.SqlDataSource(settings, console, location_finder)
from_date_time = get_date_time(from_raw_date)
to_date_time = get_date_time(to_raw_date)
current_date_time = from_date_time
while current_date_time <= to_date_time:
schedule = data_source.get_past_schedule(rows.model.area.Area(code=area_code), current_date_time.date())
for visit in schedule.visits:
visit.visit.address = None
output_file = '{0}_{1}.json'.format(output_prefix, current_date_time.date().strftime('%Y%m%d'))
with open(output_file, 'w') as output_stream:
json.dump(schedule, output_stream, cls=rows.model.json.JSONEncoder)
current_date_time += datetime.timedelta(days=1)
def get_travel_time(schedule, user_tag_finder):
routes = schedule.routes()
total_travel_time = datetime.timedelta()
with rows.plot.create_routing_session() as session:
for route in routes:
visit_it = iter(route.visits)
current_visit = next(visit_it, None)
current_location = user_tag_finder.find(int(current_visit.visit.service_user))
while current_visit:
prev_location = current_location
current_visit = next(visit_it, None)
if not current_visit:
break
current_location = user_tag_finder.find(int(current_visit.visit.service_user))
travel_time_sec = session.distance(prev_location, current_location)
if travel_time_sec:
total_travel_time += datetime.timedelta(seconds=travel_time_sec)
return total_travel_time
def info(args, settings):
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
user_tag_finder.reload()
schedule_file = get_or_raise(args, __FILE_ARG)
schedule_file_to_use = os.path.realpath(os.path.expandvars(schedule_file))
schedule = rows.load.load_schedule(schedule_file_to_use)
carers = {visit.carer for visit in schedule.visits}
print(get_travel_time(schedule, user_tag_finder), len(carers), len(schedule.visits))
def compare_distance(args, settings):
schedule_patterns = getattr(args, __SCHEDULE_PATTERNS)
labels = getattr(args, __LABELS)
output_file = getattr(args, __OUTPUT, 'distance')
output_file_format = getattr(args, __FILE_FORMAT_ARG)
data_frame_file = 'data_frame_cache.bin'
if os.path.isfile(data_frame_file):
data_frame = pandas.read_pickle(data_frame_file)
else:
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
store = []
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for label, schedule_pattern in zip(labels, schedule_patterns):
for schedule_path in glob.glob(schedule_pattern):
schedule = rows.load.load_schedule(schedule_path)
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(schedule)
frame = rows.plot.get_schedule_data_frame(schedule, problem, duration_estimator, distance_estimator)
visits = frame['Visits'].sum()
carers = len(frame.where(frame['Visits'] > 0))
idle_time = frame['Availability'] - frame['Travel'] - frame['Service']
idle_time[idle_time < pandas.Timedelta(0)] = pandas.Timedelta(0)
overtime = frame['Travel'] + frame['Service'] - frame['Availability']
overtime[overtime < pandas.Timedelta(0)] = pandas.Timedelta(0)
store.append({'Label': label,
'Date': schedule.metadata.begin,
'Availability': frame['Availability'].sum(),
'Travel': frame['Travel'].sum(),
'Service': frame['Service'].sum(),
'Idle': idle_time.sum(),
'Overtime': overtime.sum(),
'Carers': carers,
'Visits': visits})
data_frame = pandas.DataFrame(store)
data_frame.sort_values(by=['Date'], inplace=True)
data_frame.to_pickle(data_frame_file)
condensed_frame = pandas.pivot(data_frame, columns='Label', values='Travel', index='Date')
condensed_frame['Improvement'] = condensed_frame['2nd Stage'] - condensed_frame['3rd Stage']
condensed_frame['RelativeImprovement'] = condensed_frame['Improvement'] / condensed_frame['2nd Stage']
color_map = matplotlib.cm.get_cmap('Set1')
matplotlib.pyplot.set_cmap(color_map)
figure, ax = matplotlib.pyplot.subplots(1, 1, sharex=True)
try:
width = 0.20
dates = data_frame['Date'].unique()
time_delta_convert = rows.plot.TimeDeltaConverter()
indices = numpy.arange(1, len(dates) + 1, 1)
handles = []
position = 0
for color_number, label in enumerate(labels):
data_frame_to_use = data_frame[data_frame['Label'] == label]
handle = ax.bar(indices + position * width,
time_delta_convert(data_frame_to_use['Travel']),
width,
color=color_map.colors[color_number],
bottom=time_delta_convert.zero)
handles.append(handle)
position += 1
ax.yaxis_date()
yaxis_converter = rows.plot.CumulativeHourMinuteConverter()
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(yaxis_converter))
ax.set_ylabel('Total Travel Time [hh:mm:ss]')
ax.set_yticks([time_delta_convert.zero + datetime.timedelta(seconds=seconds) for seconds in range(0, 30 * 3600, 4 * 3600 + 1)])
ax.set_xlabel('Day of October 2017')
translate_labels = {
'3rd Stage': '3rd Stage',
'Human Planners': 'Human Planners'
}
labels_to_use = [translate_labels[label] if label in translate_labels else label for label in labels]
rows.plot.add_legend(ax, handles, labels_to_use, ncol=3, loc='lower center', bbox_to_anchor=(0.5, -0.25)) # , bbox_to_anchor=(0.5, -1.1)
figure.tight_layout()
figure.subplots_adjust(bottom=0.20)
rows.plot.save_figure(output_file, output_file_format)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
# figure, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(3, 1, sharex=True)
# try:
# width = 0.20
# dates = data_frame['Date'].unique()
# time_delta_convert = rows.plot.TimeDeltaConverter()
# indices = numpy.arange(1, len(dates) + 1, 1)
#
# handles = []
# position = 0
# for label in labels:
# data_frame_to_use = data_frame[data_frame['Label'] == label]
#
# handle = ax1.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Travel']),
# width,
# bottom=time_delta_convert.zero)
#
# ax2.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Idle']),
# width,
# bottom=time_delta_convert.zero)
#
# ax3.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Overtime']),
# width,
# bottom=time_delta_convert.zero)
#
# handles.append(handle)
# position += 1
#
# ax1.yaxis_date()
# ax1.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax1.set_ylabel('Travel Time')
#
# ax2.yaxis_date()
# ax2.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax2.set_ylabel('Idle Time')
#
# ax3.yaxis_date()
# ax3.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax3.set_ylabel('Total Overtime')
# ax3.set_xlabel('Day of October 2017')
#
# translate_labels = {
# '3rd Stage': 'Optimizer',
# 'Human Planners': 'Human Planners'
# }
# labels_to_use = [translate_labels[label] if label in translate_labels else label for label in labels]
#
# rows.plot.add_legend(ax3, handles, labels_to_use, ncol=3, loc='lower center', bbox_to_anchor=(0.5, -1.1))
# figure.tight_layout()
# figure.subplots_adjust(bottom=0.20)
#
# rows.plot.save_figure(output_file, output_file_format)
# finally:
# matplotlib.pyplot.cla()
# matplotlib.pyplot.close(figure)
def calculate_forecast_visit_duration(problem):
forecast_visit_duration = rows.plot.VisitDict()
for recurring_visits in problem.visits:
for local_visit in recurring_visits.visits:
forecast_visit_duration[local_visit] = local_visit.duration
return forecast_visit_duration
def compare_workload(args, settings):
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
diary_by_date_by_carer = collections.defaultdict(dict)
for carer_shift in problem.carers:
for diary in carer_shift.diaries:
diary_by_date_by_carer[diary.date][carer_shift.carer.sap_number] = diary
base_schedules = {rows.load.load_schedule(file_path): file_path
for file_path in glob.glob(getattr(args, __BASE_SCHEDULE_PATTERN))}
base_schedule_by_date = {schedule.metadata.begin: schedule for schedule in base_schedules}
candidate_schedules = {rows.load.load_schedule(file_path): file_path
for file_path in glob.glob(getattr(args, __CANDIDATE_SCHEDULE_PATTERN))}
candidate_schedule_by_date = {schedule.metadata.begin: schedule for schedule in candidate_schedules}
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
output_file_format = getattr(args, __FILE_FORMAT_ARG)
dates = set(candidate_schedule_by_date.keys())
for date in base_schedule_by_date.keys():
dates.add(date)
dates = list(dates)
dates.sort()
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for date in dates:
base_schedule = base_schedule_by_date.get(date, None)
if not base_schedule:
logging.error('No base schedule is available for %s', date)
continue
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(base_schedule)
candidate_schedule = candidate_schedule_by_date.get(date, None)
if not candidate_schedule:
logging.error('No candidate schedule is available for %s', date)
continue
base_schedule_file = base_schedules[base_schedule]
base_schedule_data_frame = rows.plot.get_schedule_data_frame(base_schedule, problem, duration_estimator, distance_estimator)
base_schedule_stem, base_schedule_ext = os.path.splitext(os.path.basename(base_schedule_file))
rows.plot.save_workforce_histogram(base_schedule_data_frame, base_schedule_stem, output_file_format)
candidate_schedule_file = candidate_schedules[candidate_schedule]
candidate_schedule_data_frame = rows.plot.get_schedule_data_frame(candidate_schedule, problem, duration_estimator, distance_estimator)
candidate_schedule_stem, candidate_schedule_ext \
= os.path.splitext(os.path.basename(candidate_schedule_file))
rows.plot.save_workforce_histogram(candidate_schedule_data_frame,
candidate_schedule_stem,
output_file_format)
rows.plot.save_combined_histogram(candidate_schedule_data_frame,
base_schedule_data_frame,
['2nd Stage', '3rd Stage'],
'contrast_workforce_{0}_combined'.format(date),
output_file_format)
def contrast_workload(args, settings):
__WIDTH = 0.35
__FORMAT = 'svg'
plot_type = getattr(args, __TYPE_ARG, None)
if plot_type != __ACTIVITY_TYPE and plot_type != __VISITS_TYPE:
raise ValueError(
'Unknown plot type: {0}. Use either {1} or {2}.'.format(plot_type, __ACTIVITY_TYPE, __VISITS_TYPE))
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
base_schedule = rows.load.load_schedule(get_or_raise(args, __BASE_FILE_ARG))
candidate_schedule = rows.load.load_schedule(get_or_raise(args, __CANDIDATE_FILE_ARG))
if base_schedule.metadata.begin != candidate_schedule.metadata.begin:
raise ValueError('Schedules begin at a different date: {0} vs {1}'
.format(base_schedule.metadata.begin, candidate_schedule.metadata.begin))
if base_schedule.metadata.end != candidate_schedule.metadata.end:
raise ValueError('Schedules end at a different date: {0} vs {1}'
.format(base_schedule.metadata.end, candidate_schedule.metadata.end))
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
diary_by_date_by_carer = collections.defaultdict(dict)
for carer_shift in problem.carers:
for diary in carer_shift.diaries:
diary_by_date_by_carer[diary.date][carer_shift.carer.sap_number] = diary
date = base_schedule.metadata.begin
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
with rows.plot.create_routing_session() as routing_session:
observed_duration_by_visit = calculate_expected_visit_duration(candidate_schedule)
base_schedule_frame = rows.plot.get_schedule_data_frame(base_schedule,
routing_session,
location_finder,
diary_by_date_by_carer[date],
observed_duration_by_visit)
candidate_schedule_frame = rows.plot.get_schedule_data_frame(candidate_schedule,
routing_session,
location_finder,
diary_by_date_by_carer[date],
observed_duration_by_visit)
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
figure, axis = matplotlib.pyplot.subplots()
matplotlib.pyplot.tight_layout()
try:
contrast_frame = pandas.DataFrame.merge(base_schedule_frame,
candidate_schedule_frame,
on='Carer',
how='left',
suffixes=['_Base', '_Candidate'])
contrast_frame['Visits_Candidate'] = contrast_frame['Visits_Candidate'].fillna(0)
contrast_frame['Availability_Candidate'] \
= contrast_frame['Availability_Candidate'].mask(pandas.isnull, contrast_frame['Availability_Base'])
contrast_frame['Travel_Candidate'] \
= contrast_frame['Travel_Candidate'].mask(pandas.isnull, datetime.timedelta())
contrast_frame['Service_Candidate'] \
= contrast_frame['Service_Candidate'].mask(pandas.isnull, datetime.timedelta())
contrast_frame = contrast_frame.sort_values(
by=['Availability_Candidate', 'Service_Candidate', 'Travel_Candidate'],
ascending=False)
if plot_type == __VISITS_TYPE:
indices = numpy.arange(len(contrast_frame.index))
base_handle = axis.bar(indices, contrast_frame['Visits_Base'], __WIDTH)
candidate_handle = axis.bar(indices + __WIDTH, contrast_frame['Visits_Candidate'], __WIDTH)
axis.legend((base_handle, candidate_handle),
('Human Planners', 'Constraint Programming'), loc='best')
output_file = problem_file_name + '_contrast_visits_' + date.isoformat() + '.' + __FORMAT
elif plot_type == __ACTIVITY_TYPE:
indices = numpy.arange(len(base_schedule_frame.index))
def plot_activity_stacked_histogram(availability, travel, service, axis, width=0.35, initial_width=0.0,
color_offset=0):
time_delta_converter = rows.plot.TimeDeltaConverter()
travel_series = numpy.array(time_delta_converter(travel))
service_series = numpy.array(time_delta_converter(service))
idle_overtime_series = list(availability - travel - service)
idle_series = numpy.array(time_delta_converter(
map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(time_delta_converter(
map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(),
idle_overtime_series)))
service_handle = axis.bar(indices + initial_width, service_series,
width,
bottom=time_delta_converter.zero,
color=color_map.colors[0 + color_offset])
travel_handle = axis.bar(indices + initial_width,
travel_series,
width,
bottom=service_series + time_delta_converter.zero_num,
color=color_map.colors[2 + color_offset])
idle_handle = axis.bar(indices + initial_width,
idle_series,
width,
bottom=service_series + travel_series + time_delta_converter.zero_num,
color=color_map.colors[4 + color_offset])
overtime_handle = axis.bar(indices + initial_width,
overtime_series,
width,
bottom=idle_series + service_series + travel_series + time_delta_converter.zero_num,
color=color_map.colors[6 + color_offset])
return service_handle, travel_handle, idle_handle, overtime_handle
travel_candidate_handle, service_candidate_handle, idle_candidate_handle, overtime_candidate_handle \
= plot_activity_stacked_histogram(contrast_frame.Availability_Candidate,
contrast_frame.Travel_Candidate,
contrast_frame.Service_Candidate,
axis,
__WIDTH)
travel_base_handle, service_base_handle, idle_base_handle, overtime_base_handle \
= plot_activity_stacked_histogram(contrast_frame.Availability_Base,
contrast_frame.Travel_Base,
contrast_frame.Service_Base,
axis,
__WIDTH,
__WIDTH,
1)
axis.yaxis_date()
axis.yaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S"))
axis.legend(
(travel_candidate_handle, service_candidate_handle, idle_candidate_handle, overtime_candidate_handle,
travel_base_handle, service_base_handle, idle_base_handle, overtime_base_handle),
('', '', '', '', 'Service', 'Travel', 'Idle', 'Overtime'), loc='best', ncol=2, columnspacing=0)
output_file = problem_file_name + '_contrast_activity_' + date.isoformat() + '.' + __FORMAT
bottom, top = axis.get_ylim()
axis.set_ylim(bottom, top + 0.025)
else:
raise ValueError('Unknown plot type {0}'.format(plot_type))
matplotlib.pyplot.subplots_adjust(left=0.125)
matplotlib.pyplot.savefig(output_file, format=__FORMAT, dpi=300)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def parse_time_delta(text):
if text:
time = datetime.datetime.strptime(text, '%H:%M:%S').time()
return datetime.timedelta(hours=time.hour, minutes=time.minute, seconds=time.second)
return None
class TraceLog:
__STAGE_PATTERN = re.compile('^\w+(?P<number>\d+)(:?\-Patch)?$')
__PENALTY_PATTERN = re.compile('^MissedVisitPenalty:\s+(?P<penalty>\d+)$')
__CARER_USED_PATTERN = re.compile('^CarerUsedPenalty:\s+(?P<penalty>\d+)$')
class ProgressMessage:
def __init__(self, **kwargs):
self.__branches = kwargs.get('branches', None)
self.__cost = kwargs.get('cost', None)
self.__dropped_visits = kwargs.get('dropped_visits', None)
self.__memory_usage = kwargs.get('memory_usage', None)
self.__solutions = kwargs.get('solutions', None)
self.__wall_time = parse_time_delta(kwargs.get('wall_time', None))
@property
def cost(self):
return self.__cost
@property
def solutions(self):
return self.__solutions
@property
def dropped_visits(self):
return self.__dropped_visits
class ProblemMessage:
def __init__(self, **kwargs):
self.__carers = kwargs.get('carers', None)
self.__visits = kwargs.get('visits', None)
self.__date = kwargs.get('date', None)
if self.__date:
self.__date = datetime.datetime.strptime(self.__date, '%Y-%b-%d').date()
self.__visit_time_windows = parse_time_delta(kwargs.get('visit_time_windows', None))
self.__break_time_windows = parse_time_delta(kwargs.get('break_time_windows', None))
self.__shift_adjustment = parse_time_delta(kwargs.get('shift_adjustment', None))
self.__area = kwargs.get('area', None)
self.__missed_visit_penalty = kwargs.get('missed_visit_penalty', None)
self.__carer_used_penalty = kwargs.get('carer_used_penalty', None)
@property
def date(self):
return self.__date
@property
def carers(self):
return self.__carers
@property
def visits(self):
return self.__visits
@property
def visit_time_window(self):
return self.__visit_time_windows
@property
def carer_used_penalty(self):
return self.__carer_used_penalty
@carer_used_penalty.setter
def carer_used_penalty(self, value):
self.__carer_used_penalty = value
@property
def missed_visit_penalty(self):
return self.__missed_visit_penalty
@missed_visit_penalty.setter
def missed_visit_penalty(self, value):
self.__missed_visit_penalty = value
@property
def shift_adjustment(self):
return self.__shift_adjustment
StageSummary = collections.namedtuple('StageSummary', ['duration', 'final_cost', 'final_dropped_visits'])
def __init__(self, time_point):
self.__start = time_point
self.__events = []
self.__current_stage = None
self.__current_strategy = None
self.__problem = TraceLog.ProblemMessage()
@staticmethod
def __parse_stage_number(body):
comment = body.get('comment', None)
if comment:
match = TraceLog.__STAGE_PATTERN.match(comment)
if match:
return int(match.group('number'))
return None
def append(self, time_point, body):
if 'branches' in body:
body_to_use = TraceLog.ProgressMessage(**body)
elif 'type' in body:
if body['type'] == 'started':
self.__current_stage = self.__parse_stage_number(body)
elif body['type'] == 'finished':
self.__current_stage = None
self.__current_strategy = None
elif body['type'] == 'unknown':
if 'comment' in body:
if 'MissedVisitPenalty' in body['comment']:
match = re.match(self.__PENALTY_PATTERN, body['comment'])
assert match is not None
missed_visit_penalty = int(match.group('penalty'))
self.__problem.missed_visit_penalty = missed_visit_penalty
elif 'CarerUsedPenalty' in body['comment']:
match = re.match(self.__CARER_USED_PATTERN, body['comment'])
assert match is not None
carer_used_penalty = int(match.group('penalty'))
self.__problem.carer_used_penalty = carer_used_penalty
body_to_use = body
elif 'area' in body:
body_to_use = TraceLog.ProblemMessage(**body)
if body_to_use.missed_visit_penalty is None and self.__problem.missed_visit_penalty is not None:
body_to_use.missed_visit_penalty = self.__problem.missed_visit_penalty
if body_to_use.carer_used_penalty is None and self.__problem.carer_used_penalty is not None:
body_to_use.carer_used_penalty = self.__problem.carer_used_penalty
self.__problem = body_to_use
else:
body_to_use = body
# quick fix to prevent negative computation time if the time frame crosses midnight
if self.__start < time_point:
computation_time = time_point - self.__start
else:
computation_time = time_point + datetime.timedelta(hours=24) - self.__start
self.__events.append([computation_time, self.__current_stage, self.__current_strategy, time_point, body_to_use])
def compute_stages(self) -> typing.List[StageSummary]:
groups = dict()
for delta, stage, topic, time, message in self.__events:
if isinstance(message, TraceLog.ProgressMessage):
if stage not in groups:
groups[stage] = []
groups[stage].append([delta, topic, message])
result = []
def create_stage_summary(group):
duration = group[-1][0] - group[0][0]
cost = group[-1][2].cost
dropped_visits = group[-1][2].dropped_visits
return TraceLog.StageSummary(duration=duration, final_cost=cost, final_dropped_visits=dropped_visits)
if len(groups) == 1:
result.append(create_stage_summary(groups[None]))
else:
for stage in range(1, max(filter(lambda s: s is not None, groups)) + 1):
result.append(create_stage_summary(groups[stage]))
return result
def has_stages(self):
for relative_time, stage, strategy, absolute_time, event in self.__events:
if isinstance(event, TraceLog.ProblemMessage) or isinstance(event, TraceLog.ProgressMessage):
continue
if 'type' in event and event['type'] == 'started':
return True
return False
def best_cost(self, stage: int):
best_cost, _ = self.__best_cost_and_time(stage)
return best_cost
def best_cost_time(self, stage: int):
_, best_cost_time = self.__best_cost_and_time(stage)
return best_cost_time
def last_cost(self):
last_cost, _ = self.__last_cost_and_time()
return last_cost
def last_cost_time(self):
_, last_cost_time = self.__last_cost_and_time()
return last_cost_time
def computation_time(self):
computation_time = datetime.timedelta.max
for relative_time, stage, strategy, absolute_time, event in self.__events:
computation_time = relative_time
return computation_time
def __best_cost_and_time(self, stage: int):
best_cost = float('inf')
best_time = datetime.timedelta.max
for relative_time, event_stage, strategy, absolute_time, event in self.__filtered_events():
if event_stage > stage:
continue
if best_cost > event.cost:
best_cost = event.cost
best_time = relative_time
return best_cost, best_time
def __last_cost_and_time(self):
last_cost = float('inf')
last_time = datetime.timedelta.max
for relative_time, stage, strategy, absolute_time, event in self.__filtered_events():
last_cost = event.cost
last_time = relative_time
return last_cost, last_time
def __filtered_events(self):
for relative_time, stage, strategy, absolute_time, event in self.__events:
if stage != 2 and stage != 3:
continue
if strategy == 'DELAY_RISKINESS_REDUCTION':
continue
if not isinstance(event, TraceLog.ProgressMessage):
continue
yield relative_time, stage, strategy, absolute_time, event
@property
def strategy(self):
return self.__current_strategy
@strategy.setter
def strategy(self, value):
self.__current_strategy = value
@property
def visits(self):
return self.__problem.visits
@property
def carers(self):
return self.__problem.carers
@property
def date(self):
return self.__problem.date
@property
def visit_time_window(self):
return self.__problem.visit_time_window
@property
def carer_used_penalty(self):
return self.__problem.carer_used_penalty
@property
def missed_visit_penalty(self):
return self.__problem.missed_visit_penalty
@property
def shift_adjustment(self):
return self.__problem.shift_adjustment
@property
def events(self):
return self.__events
def read_traces(trace_file) -> typing.List[TraceLog]:
log_line_pattern = re.compile('^\w+\s+(?P<time>\d+:\d+:\d+\.\d+).*?]\s+(?P<body>.*)$')
other_line_pattern = re.compile('^.*?\[\w+\s+(?P<time>\d+:\d+:\d+\.\d+).*?\]\s+(?P<body>.*)$')
strategy_line_pattern = re.compile('^Solving the (?P<stage_name>\w+) stage using (?P<strategy_name>\w+) strategy$')
loaded_visits_pattern = re.compile('^Loaded past visits in \d+ seconds$')
trace_logs = []
has_preambule = False
with open(trace_file, 'r') as input_stream:
current_log = None
for line in input_stream:
match = log_line_pattern.match(line)
if not match:
match = other_line_pattern.match(line)
if match:
raw_time = match.group('time')
time = datetime.datetime.strptime(raw_time, '%H:%M:%S.%f')
try:
raw_body = match.group('body')
body = json.loads(raw_body)
if 'comment' in body and (body['comment'] == 'All'
or 'MissedVisitPenalty' in body['comment']
or 'CarerUsedPenalty' in body['comment']):
if body['comment'] == 'All':
if 'type' in body:
if body['type'] == 'finished':
has_preambule = False
current_log.strategy = None
elif body['type'] == 'started':
has_preambule = True
current_log = TraceLog(time)
current_log.append(time, body)
trace_logs.append(current_log)
else:
current_log.append(time, body)
elif 'area' in body and not has_preambule:
current_log = TraceLog(time)
current_log.append(time, body)
trace_logs.append(current_log)
else:
current_log.append(time, body)
except json.decoder.JSONDecodeError:
strategy_match = strategy_line_pattern.match(match.group('body'))
if strategy_match:
current_log.strategy = strategy_match.group('strategy_name')
continue
loaded_visits_match = loaded_visits_pattern.match(match.group('body'))
if loaded_visits_match:
continue
warnings.warn('Failed to parse line: ' + line)
elif 'GUIDED_LOCAL_SEARCH specified without sane timeout: solve may run forever.' in line:
continue
else:
warnings.warn('Failed to match line: ' + line)
return trace_logs
def traces_to_data_frame(trace_logs):
columns = ['relative_time', 'cost', 'dropped_visits', 'solutions', 'stage', 'stage_started', 'date', 'carers',
'visits']
has_stages = [trace.has_stages() for trace in trace_logs]
if all(has_stages) != any(has_stages):
raise ValueError('Some traces have stages while others do not')
has_stages = all(has_stages)
data = []
if has_stages:
for trace in trace_logs:
current_carers = None
current_visits = None
current_stage_started = None
current_stage_name = None
for rel_time, stage, strategy, abs_time, event in trace.events:
if isinstance(event, TraceLog.ProblemMessage):
current_carers = event.carers
current_visits = event.visits
elif isinstance(event, TraceLog.ProgressMessage):
if not current_stage_name:
continue
data.append([rel_time,
event.cost, event.dropped_visits, event.solutions,
current_stage_name, current_stage_started,
trace.date, current_carers, current_visits])
elif 'type' in event:
if 'comment' in event and event['type'] == 'unknown':
continue
if event['type'] == 'finished':
current_carers = None
current_visits = None
current_stage_started = None
current_stage_name = None
continue
if event['type'] == 'started':
current_stage_started = rel_time
current_stage_name = event['comment']
else:
for trace in trace_logs:
current_carers = None
current_visits = None
for rel_time, stage, strategy, abs_time, event in trace.events:
if isinstance(event, TraceLog.ProblemMessage):
current_carers = event.carers
current_visits = event.visits
elif isinstance(event, TraceLog.ProgressMessage):
data.append([rel_time,
event.cost, event.dropped_visits, event.solutions,
None, None,
trace.date, current_carers, current_visits])
return pandas.DataFrame(data=data, columns=columns)
def parse_pandas_duration(value):
raw_hours, raw_minutes, raw_seconds = value.split(':')
return datetime.timedelta(hours=int(raw_hours), minutes=int(raw_minutes), seconds=int(raw_seconds))
class DateTimeFormatter:
def __init__(self, format):
self.__format = format
def __call__(self, x, pos=None):
if x < 0:
return None
x_to_use = x
if isinstance(x, numpy.int64):
x_to_use = x.item()
delta = datetime.timedelta(seconds=x_to_use)
time_point = datetime.datetime(2017, 1, 1) + delta
return time_point.strftime(self.__format)
class AxisSettings:
def __init__(self, minutes_per_step, format_pattern, units_label, right_xlimit, xticks):
self.__minutes_per_step = minutes_per_step
self.__format_pattern = format_pattern
self.__formatter = matplotlib.ticker.FuncFormatter(DateTimeFormatter(self.__format_pattern))
self.__units_label = units_label
self.__right_xlimit = right_xlimit
self.__xticks = xticks
@property
def formatter(self):
return self.__formatter
@property
def units_label(self):
return self.__units_label
@property
def right_xlimit(self):
return self.__right_xlimit
@property
def xticks(self):
return self.__xticks
@staticmethod
def infer(max_relative_time):
if datetime.timedelta(minutes=30) < max_relative_time < datetime.timedelta(hours=1):
minutes_step = 10
format = '%H:%M'
units = '[hh:mm]'
elif datetime.timedelta(hours=1) <= max_relative_time:
minutes_step = 60
format = '%H:%M'
units = '[hh:mm]'
else:
assert max_relative_time <= datetime.timedelta(minutes=30)
minutes_step = 5
format = '%M:%S'
units = '[mm:ss]'
right_xlimit = (max_relative_time + datetime.timedelta(minutes=1)).total_seconds() // 60 * 60
xticks = numpy.arange(0, max_relative_time.total_seconds() + minutes_step * 60, minutes_step * 60)
return AxisSettings(minutes_step, format, units, right_xlimit, xticks)
def format_timedelta_pandas(x, pos=None):
if x < 0:
return None
time_delta = pandas.to_timedelta(x)
hours = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_HOUR)
minutes = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_MIN) - 60 * hours
return '{0:02d}:{1:02d}'.format(hours, minutes)
def format_time(x, pos=None):
if isinstance(x, numpy.int64):
x = x.item()
delta = datetime.timedelta(seconds=x)
time_point = datetime.datetime(2017, 1, 1) + delta
return time_point.strftime('%H:%M')
__SCATTER_POINT_SIZE = 1
__Y_AXIS_EXTENSION = 1.2
def add_trace_legend(axis, handles, bbox_to_anchor=(0.5, -0.23), ncol=3):
first_row = handles[0]
def legend_single_stage(row):
handle, multi_visits, visits, carers, cost_function, date = row
date_time = datetime.datetime.combine(date, datetime.time())
return 'V{0:02}/{1:03} C{2:02} {3} {4}'.format(multi_visits,
visits,
carers,
cost_function,
date_time.strftime('%d-%m'))
def legend_multi_stage(row):
handle, multi_visits, visits, multi_carers, carers, cost_function, date = row
date_time = datetime.datetime.combine(date, datetime.time())
return 'V{0:02}/{1:03} C{2:02}/{3:02} {4} {5}' \
.format(multi_visits, visits, multi_carers, carers, cost_function, date_time.strftime('%d-%m'))
if len(first_row) == 6:
legend_formatter = legend_single_stage
elif len(first_row) == 7:
legend_formatter = legend_multi_stage
else:
raise ValueError('Expecting row of either 6 or 7 elements')
return rows.plot.add_legend(axis,
list(map(operator.itemgetter(0), handles)),
list(map(legend_formatter, handles)),
ncol,
bbox_to_anchor)
def scatter_cost(axis, data_frame, color):
return axis.scatter(
[time_delta.total_seconds() for time_delta in data_frame['relative_time']], data_frame['cost'],
s=__SCATTER_POINT_SIZE,
c=color)
def scatter_dropped_visits(axis, data_frame, color):
axis.scatter(
[time_delta.total_seconds() for time_delta in data_frame['relative_time']],
data_frame['dropped_visits'],
s=__SCATTER_POINT_SIZE,
c=color)
def draw_avline(axis, point, color='lightgrey', linestyle='--'):
axis.axvline(point, color=color, linestyle=linestyle, linewidth=0.8, alpha=0.8)
def get_problem_stats(problem, date):
problem_visits = [visit for carer_visits in problem.visits
for visit in carer_visits.visits if visit.date == date]
return len(problem_visits), len([visit for visit in problem_visits if visit.carer_count > 1])
def compare_trace(args, settings):
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
cost_function = get_or_raise(args, __COST_FUNCTION_TYPE)
trace_file = get_or_raise(args, __FILE_ARG)
trace_file_base_name = os.path.basename(trace_file)
trace_file_stem, trace_file_ext = os.path.splitext(trace_file_base_name)
output_file_stem = getattr(args, __OUTPUT, trace_file_stem)
trace_logs = read_traces(trace_file)
data_frame = traces_to_data_frame(trace_logs)
current_date = getattr(args, __DATE_ARG, None)
dates = data_frame['date'].unique()
if current_date and current_date not in dates:
raise ValueError('Date {0} is not present in the data set'.format(current_date))
color_numbers = [0, 2, 4, 6, 8, 10, 12, 1, 3, 5, 7, 9, 11, 13]
color_number_it = iter(color_numbers)
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
max_relative_time = datetime.timedelta()
try:
if current_date:
current_color = color_map.colors[next(color_number_it)]
total_problem_visits, total_multiple_carer_visits = get_problem_stats(problem, current_date)
current_date_frame = data_frame[data_frame['date'] == current_date]
max_relative_time = max(current_date_frame['relative_time'].max(), max_relative_time)
ax_settings = AxisSettings.infer(max_relative_time)
stages = current_date_frame['stage'].unique()
if len(stages) > 1:
handles = []
for stage in stages:
time_delta = current_date_frame[current_date_frame['stage'] == stage]['stage_started'].iloc[0]
current_stage_data_frame = current_date_frame[current_date_frame['stage'] == stage]
draw_avline(ax1, time_delta.total_seconds())
draw_avline(ax2, time_delta.total_seconds())
total_stage_visits = current_stage_data_frame['visits'].iloc[0]
carers = current_stage_data_frame['carers'].iloc[0]
handle = scatter_cost(ax1, current_date_frame, current_color)
scatter_dropped_visits(ax2, current_stage_data_frame, current_color)
handles.append([handle,
total_multiple_carer_visits,
total_stage_visits,
carers,
cost_function,
current_date])
ax2.set_xlim(left=0)
ax2.set_ylim(bottom=-10)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
else:
total_visits = current_date_frame['visits'].iloc[0]
if total_visits != (total_problem_visits + total_multiple_carer_visits):
raise ValueError('Number of visits in problem and solution does not match: {0} vs {1}'
.format(total_visits, (total_problem_visits + total_multiple_carer_visits)))
carers = current_date_frame['carers'].iloc[0]
handle = ax1.scatter(
[time_delta.total_seconds() for time_delta in current_date_frame['relative_time']],
current_date_frame['cost'], s=1)
add_trace_legend(ax1, [[handle, total_multiple_carer_visits, total_problem_visits, carers, cost_function]])
scatter_dropped_visits(ax2, current_date_frame, current_color)
ax1_y_bottom, ax1_y_top = ax1.get_ylim()
ax1.set_ylim(bottom=0, top=ax1_y_top * __Y_AXIS_EXTENSION)
ax1.set_ylabel('Cost Function [s]')
ax2_y_bottom, ax2_y_top = ax2.get_ylim()
ax2.set_ylim(bottom=-10, top=ax2_y_top * __Y_AXIS_EXTENSION)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
rows.plot.save_figure(output_file_stem + '_' + current_date.isoformat())
else:
handles = []
for current_date in dates:
current_color = color_map.colors[next(color_number_it)]
current_date_frame = data_frame[data_frame['date'] == current_date]
max_relative_time = max(current_date_frame['relative_time'].max(), max_relative_time)
total_problem_visits, total_multiple_carer_visits = get_problem_stats(problem, current_date)
stages = current_date_frame['stage'].unique()
if len(stages) > 1:
stage_linestyles = [None, 'dotted', 'dashed']
for stage, linestyle in zip(stages, stage_linestyles):
time_delta = current_date_frame[current_date_frame['stage'] == stage]['stage_started'].iloc[0]
draw_avline(ax1, time_delta.total_seconds(), color=current_color, linestyle=linestyle)
draw_avline(ax2, time_delta.total_seconds(), color=current_color, linestyle=linestyle)
total_carers = current_date_frame['carers'].max()
multi_carers = current_date_frame['carers'].min()
if multi_carers == total_carers:
multi_carers = 0
total_visits = current_date_frame['visits'].max()
multi_visits = current_date_frame['visits'].min()
if multi_visits == total_visits:
multi_visits = 0
handle = scatter_cost(ax1, current_date_frame, current_color)
scatter_dropped_visits(ax2, current_date_frame, current_color)
handles.append([handle,
multi_visits,
total_visits,
multi_carers,
total_carers,
cost_function,
current_date])
else:
total_visits = current_date_frame['visits'].iloc[0]
if total_visits != (total_problem_visits + total_multiple_carer_visits):
raise ValueError('Number of visits in problem and solution does not match: {0} vs {1}'
.format(total_visits, (total_problem_visits + total_multiple_carer_visits)))
carers = current_date_frame['carers'].iloc[0]
handle = scatter_cost(ax1, current_date_frame, current_color)
handles.append([handle,
total_multiple_carer_visits,
total_problem_visits,
carers,
cost_function,
current_date])
scatter_dropped_visits(ax2, current_date_frame, current_color)
ax_settings = AxisSettings.infer(max_relative_time)
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
# if add_arrows:
# ax1.arrow(950, 200000, 40, -110000, head_width=10, head_length=20000, fc='k', ec='k')
# ax2.arrow(950, 60, 40, -40, head_width=10, head_length=10, fc='k', ec='k')
ax1_y_bottom, ax1_y_top = ax1.get_ylim()
ax1.set_ylim(bottom=0, top=ax1_y_top * __Y_AXIS_EXTENSION)
ax1.set_xlim(left=0, right=ax_settings.right_xlimit)
ax1.set_ylabel('Cost Function [s]')
ax2_y_bottom, ax2_y_top = ax2.get_ylim()
ax2.set_ylim(bottom=-10, top=ax2_y_top * __Y_AXIS_EXTENSION)
ax2.set_xlim(left=0, right=ax_settings.right_xlimit)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax2.set_xticks(ax_settings.xticks)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
matplotlib.pyplot.tight_layout()
rows.plot.save_figure(output_file_stem)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def get_schedule_stats(data_frame):
def get_stage_stats(stage):
if stage and (isinstance(stage, str) or (isinstance(stage, float) and not numpy.isnan(stage))):
stage_frame = data_frame[data_frame['stage'] == stage]
else:
stage_frame = data_frame[data_frame['stage'].isnull()]
min_carers, max_carers = stage_frame['carers'].min(), stage_frame['carers'].max()
if min_carers != max_carers:
raise ValueError(
'Numbers of carer differs within stage in range [{0}, {1}]'.format(min_carers, max_carers))
min_visits, max_visits = stage_frame['visits'].min(), stage_frame['visits'].max()
if min_visits != max_visits:
raise ValueError(
'Numbers of carer differs within stage in range [{0}, {1}]'.format(min_visits, max_visits))
return min_carers, min_visits
stages = data_frame['stage'].unique()
if len(stages) > 1:
data = []
for stage in stages:
carers, visits = get_stage_stats(stage)
data.append([stage, carers, visits])
return data
else:
stage_to_use = None
if len(stages) == 1:
stage_to_use = stages[0]
carers, visits = get_stage_stats(stage_to_use)
return [[None, carers, visits]]
def contrast_trace(args, settings):
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
output_file_stem = getattr(args, __OUTPUT, problem_file_name + '_contrast_traces')
cost_function = get_or_raise(args, __COST_FUNCTION_TYPE)
base_trace_file = get_or_raise(args, __BASE_FILE_ARG)
candidate_trace_file = get_or_raise(args, __CANDIDATE_FILE_ARG)
base_frame = traces_to_data_frame(read_traces(base_trace_file))
candidate_frame = traces_to_data_frame(read_traces(candidate_trace_file))
current_date = get_or_raise(args, __DATE_ARG)
if current_date not in base_frame['date'].unique():
raise ValueError('Date {0} is not present in the base data set'.format(current_date))
if current_date not in candidate_frame['date'].unique():
raise ValueError('Date {0} is not present in the candidate data set'.format(current_date))
max_relative_time = datetime.timedelta()
max_relative_time = max(base_frame[base_frame['date'] == current_date]['relative_time'].max(), max_relative_time)
max_relative_time = max(candidate_frame[candidate_frame['date'] == current_date]['relative_time'].max(), max_relative_time)
max_relative_time = datetime.timedelta(minutes=20)
ax_settings = AxisSettings.infer(max_relative_time)
color_map = matplotlib.cm.get_cmap('Set1')
matplotlib.pyplot.set_cmap(color_map)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
try:
def plot(data_frame, color):
stages = data_frame['stage'].unique()
if len(stages) > 1:
for stage, linestyle in zip(stages, [None, 'dotted', 'dashed']):
time_delta = data_frame[data_frame['stage'] == stage]['stage_started'].iloc[0]
draw_avline(ax1, time_delta.total_seconds(), linestyle=linestyle)
draw_avline(ax2, time_delta.total_seconds(), linestyle=linestyle)
scatter_dropped_visits(ax2, data_frame, color=color)
return scatter_cost(ax1, data_frame, color=color)
base_current_data_frame = base_frame[base_frame['date'] == current_date]
base_handle = plot(base_current_data_frame, color_map.colors[0])
base_stats = get_schedule_stats(base_current_data_frame)
candidate_current_data_frame = candidate_frame[candidate_frame['date'] == current_date]
candidate_handle = plot(candidate_current_data_frame, color_map.colors[1])
candidate_stats = get_schedule_stats(candidate_current_data_frame)
labels = []
for stages in [base_stats, candidate_stats]:
if len(stages) == 1:
labels.append('Direct')
elif len(stages) > 1:
labels.append('Multistage')
else:
raise ValueError()
ax1.set_ylim(bottom=0.0)
ax1.set_ylabel('Cost Function [s]')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
ax1.set_xlim(left=0.0, right=max_relative_time.total_seconds())
legend1 = ax1.legend([base_handle, candidate_handle], labels)
for handle in legend1.legendHandles:
handle._sizes = [25]
ax2.set_xlim(left=0.0, right=max_relative_time.total_seconds())
ax2.set_ylim(bottom=0.0)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax1.set_xticks(ax_settings.xticks)
ax2.set_xticks(ax_settings.xticks)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
legend2 = ax2.legend([base_handle, candidate_handle], labels)
for handle in legend2.legendHandles:
handle._sizes = [25]
figure.tight_layout()
matplotlib.pyplot.tight_layout()
rows.plot.save_figure(output_file_stem + '_' + current_date.isoformat())
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
try:
candidate_current_data_frame = candidate_frame[candidate_frame['date'] == current_date]
scatter_dropped_visits(ax2, candidate_current_data_frame, color=color_map.colors[1])
scatter_cost(ax1, candidate_current_data_frame, color=color_map.colors[1])
stage2_started = \
candidate_current_data_frame[candidate_current_data_frame['stage'] == 'Stage2']['stage_started'].iloc[0]
ax1.set_ylim(bottom=0, top=6 * 10 ** 4)
ax1.set_ylabel('Cost Function [s]')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
ax1.set_xlim(left=0, right=12)
ax2.set_xlim(left=0, right=12)
x_ticks_positions = range(0, 12 + 1, 2)
# matplotlib.pyplot.locator_params(axis='x', nbins=6)
ax2.set_ylim(bottom=-10.0, top=120)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax2.set_xticks(x_ticks_positions)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
matplotlib.pyplot.tight_layout()
# rows.plot.save_figure(output_file_stem + '_first_stage_' + current_date.isoformat())
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compare_box_plots(args, settings):
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
base_trace_file = get_or_raise(args, __BASE_FILE_ARG)
output_file_stem = getattr(args, __OUTPUT, problem_file_name)
traces = read_traces(base_trace_file)
figure, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(1, 3)
stages = [trace.compute_stages() for trace in traces]
num_stages = max(len(s) for s in stages)
durations = [[getattr(local_stage[num_stage], 'duration').total_seconds() for local_stage in stages] for num_stage in range(num_stages)]
max_duration = max(max(stage_durations) for stage_durations in durations)
axis_settings = AxisSettings.infer(datetime.timedelta(seconds=max_duration))
try:
ax1.boxplot(durations, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax1.set_yticks(axis_settings.xticks)
ax1.yaxis.set_major_formatter(axis_settings.formatter)
ax1.set_xlabel('Stage')
ax1.set_ylabel('Duration [hh:mm]')
costs = [[getattr(local_stage[num_stage], 'final_cost') for local_stage in stages] for num_stage in range(num_stages)]
ax2.boxplot(costs, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
formatter = matplotlib.ticker.ScalarFormatter()
formatter.set_scientific(True)
formatter.set_powerlimits((-3, 3))
ax2.yaxis.set_major_formatter(formatter)
ax2.set_xlabel('Stage')
ax2.set_ylabel('Cost')
declined_visits = [[getattr(local_stage[num_stage], 'final_dropped_visits') for local_stage in stages] for num_stage in range(num_stages)]
ax3.boxplot(declined_visits, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
max_declined_visits = max(max(declined_visits))
ax3.set_xlabel('Stage')
ax3.set_ylabel('Declined Visits')
dropped_visit_ticks = None
if max_declined_visits < 100:
dropped_visit_ticks = range(0, max_declined_visits + 1)
else:
dropped_visit_ticks = range(0, max_declined_visits + 100, 100)
ax3.set_yticks(dropped_visit_ticks)
figure.tight_layout()
rows.plot.save_figure(output_file_stem)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compare_prediction_error(args, settings):
base_schedule = rows.plot.load_schedule(get_or_raise(args, __BASE_FILE_ARG))
candidate_schedule = rows.plot.load_schedule(get_or_raise(args, __CANDIDATE_FILE_ARG))
observed_duration_by_visit = rows.plot.calculate_observed_visit_duration(base_schedule)
expected_duration_by_visit = calculate_expected_visit_duration(candidate_schedule)
data = []
for visit in base_schedule.visits:
observed_duration = observed_duration_by_visit[visit.visit]
expected_duration = expected_duration_by_visit[visit.visit]
data.append([visit.key, observed_duration.total_seconds(), expected_duration.total_seconds()])
frame = pandas.DataFrame(columns=['Visit', 'ObservedDuration', 'ExpectedDuration'], data=data)
frame['Error'] = (frame.ObservedDuration - frame.ExpectedDuration) / frame.ObservedDuration
figure, axis = matplotlib.pyplot.subplots()
try:
axis.plot(frame['Error'], label='(Observed - Expected)/Observed)')
axis.legend()
axis.set_ylim(-20, 2)
axis.grid()
matplotlib.pyplot.show()
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def remove_violated_visits(rough_schedule: rows.model.schedule.Schedule,
metadata: TraceLog,
problem: rows.model.problem.Problem,
duration_estimator: rows.plot.DurationEstimator,
distance_estimator: rows.plot.DistanceEstimator) -> rows.model.schedule.Schedule:
max_delay = metadata.visit_time_window
min_delay = -metadata.visit_time_window
dropped_visits = 0
allowed_visits = []
for route in rough_schedule.routes:
carer_diary = problem.get_diary(route.carer, metadata.date)
if not carer_diary:
continue
for visit in route.visits:
if visit.check_in is not None:
check_in_delay = visit.check_in - datetime.datetime.combine(metadata.date, visit.time)
if check_in_delay > max_delay: # or check_in_delay < min_delay:
dropped_visits += 1
continue
allowed_visits.append(visit)
# schedule does not have visits which exceed time windows
first_improved_schedule = rows.model.schedule.Schedule(carers=rough_schedule.carers, visits=allowed_visits)
allowed_visits = []
for route in first_improved_schedule.routes:
if not route.visits:
continue
diary = problem.get_diary(route.carer, metadata.date)
assert diary is not None
# shift adjustment is added twice because it is allowed to extend the time before and after the working hours
max_shift_end = max(event.end for event in diary.events) + metadata.shift_adjustment + metadata.shift_adjustment
first_visit = route.visits[0]
current_time = datetime.datetime.combine(metadata.date, first_visit.time)
if current_time <= max_shift_end:
allowed_visits.append(first_visit)
visits_made = []
total_slack = datetime.timedelta()
if len(route.visits) == 1:
visit = route.visits[0]
visit_duration = duration_estimator(visit.visit)
if visit_duration is None:
visit_duration = visit.duration
current_time += visit_duration
if current_time <= max_shift_end:
visits_made.append(visit)
else:
dropped_visits += 1
else:
for prev_visit, next_visit in route.edges():
visit_duration = duration_estimator(prev_visit.visit)
if visit_duration is None:
visit_duration = prev_visit.duration
current_time += visit_duration
current_time += distance_estimator(prev_visit, next_visit)
start_time = max(current_time, datetime.datetime.combine(metadata.date, next_visit.time) - max_delay)
total_slack += start_time - current_time
current_time = start_time
if current_time <= max_shift_end:
visits_made.append(next_visit)
else:
dropped_visits += 1
if current_time <= max_shift_end:
total_slack += max_shift_end - current_time
total_break_duration = datetime.timedelta()
for carer_break in diary.breaks:
total_break_duration += carer_break.duration
if total_slack + datetime.timedelta(hours=2) < total_break_duration:
# route is not respecting contractual breaks
visits_made.pop()
for visit in visits_made:
allowed_visits.append(visit)
# schedule does not contain visits which exceed overtime of the carer
return rows.model.schedule.Schedule(carers=rough_schedule.carers, visits=allowed_visits)
class ScheduleCost:
CARER_COST = datetime.timedelta(seconds=60 * 60 * 4)
def __init__(self, travel_time: datetime.timedelta, carers_used: int, visits_missed: int, missed_visit_penalty: int):
self.__travel_time = travel_time
self.__carers_used = carers_used
self.__visits_missed = visits_missed
self.__missed_visit_penalty = missed_visit_penalty
@property
def travel_time(self) -> datetime.timedelta:
return self.__travel_time
@property
def visits_missed(self) -> int:
return self.__visits_missed
@property
def missed_visit_penalty(self) -> int:
return self.__missed_visit_penalty
@property
def carers_used(self) -> int:
return self.__carers_used
def total_cost(self, include_vehicle_cost: bool) -> datetime.timedelta:
cost = self.__travel_time.total_seconds() + self.__missed_visit_penalty * self.__visits_missed
if include_vehicle_cost:
cost += self.CARER_COST.total_seconds() * self.__carers_used
return cost
def get_schedule_cost(schedule: rows.model.schedule.Schedule,
metadata: TraceLog,
problem: rows.model.problem.Problem,
distance_estimator: rows.plot.DistanceEstimator) -> ScheduleCost:
carer_used_ids = set()
visit_made_ids = set()
travel_time = datetime.timedelta()
for route in schedule.routes:
if not route.visits:
continue
carer_used_ids.add(route.carer.sap_number)
for visit in route.visits:
visit_made_ids.add(visit.visit.key)
for source, destination in route.edges():
travel_time += distance_estimator(source, destination)
available_visit_ids = {visit.key for visit in problem.requested_visits(schedule.date)}
return ScheduleCost(travel_time, len(carer_used_ids), len(available_visit_ids.difference(visit_made_ids)), metadata.missed_visit_penalty)
def compare_schedule_cost(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig',
['ProblemPath', 'HumanSolutionPath', 'SolverSecondSolutionPath', 'SolverThirdSolutionPath'])
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
solver_log_file = os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5.err.log')
problem_data = [ProblemConfig(os.path.join(simulation_dir, 'problems/C350_past.json'),
os.path.join(simulation_dir, 'planner_schedules/C350_planners_201710{0:02d}.json'.format(day)),
os.path.join(simulation_dir, 'solutions/second_stage_c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)),
os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15, 1)]
solver_traces = read_traces(solver_log_file)
assert len(solver_traces) == len(problem_data)
results = []
include_vehicle_cost = False
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
def normalize_cost(value) -> float:
if isinstance(value, datetime.timedelta):
value_to_use = value.total_seconds()
elif isinstance(value, float) or isinstance(value, int):
value_to_use = value
else:
return float('inf')
return round(value_to_use / 3600, 2)
for solver_trace, problem_data in list(zip(solver_traces, problem_data)):
problem = rows.load.load_problem(os.path.join(simulation_dir, problem_data.ProblemPath))
human_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.HumanSolutionPath))
solver_second_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverSecondSolutionPath))
solver_third_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverThirdSolutionPath))
assert solver_second_schedule.date == human_schedule.date
assert solver_third_schedule.date == human_schedule.date
available_carers = problem.available_carers(human_schedule.date)
requested_visits = problem.requested_visits(human_schedule.date)
one_carer_visits = [visit for visit in requested_visits if visit.carer_count == 1]
two_carer_visits = [visit for visit in requested_visits if visit.carer_count == 2]
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(solver_third_schedule)
human_schedule_to_use = remove_violated_visits(human_schedule, solver_trace, problem, duration_estimator, distance_estimator)
solver_second_schedule_to_use = remove_violated_visits(solver_second_schedule, solver_trace, problem, duration_estimator,
distance_estimator)
solver_third_schedule_to_use = remove_violated_visits(solver_third_schedule, solver_trace, problem, duration_estimator,
distance_estimator)
human_cost = get_schedule_cost(human_schedule_to_use, solver_trace, problem, distance_estimator)
solver_second_cost = get_schedule_cost(solver_second_schedule_to_use, solver_trace, problem, distance_estimator)
solver_third_cost = get_schedule_cost(solver_third_schedule_to_use, solver_trace, problem, distance_estimator)
results.append(collections.OrderedDict(date=solver_trace.date,
day=solver_trace.date.day,
carers=len(available_carers),
one_carer_visits=len(one_carer_visits),
two_carer_visits=2 * len(two_carer_visits),
missed_visit_penalty=normalize_cost(solver_trace.missed_visit_penalty),
carer_used_penalty=normalize_cost(solver_trace.carer_used_penalty),
planner_missed_visits=human_cost.visits_missed,
solver_second_missed_visits=solver_second_cost.visits_missed,
solver_third_missed_visits=solver_third_cost.visits_missed,
planner_travel_time=normalize_cost(human_cost.travel_time),
solver_second_travel_time=normalize_cost(solver_second_cost.travel_time),
solver_third_travel_time=normalize_cost(solver_third_cost.travel_time),
planner_carers_used=human_cost.carers_used,
solver_second_carers_used=solver_second_cost.carers_used,
solver_third_carers_used=solver_third_cost.carers_used,
planner_total_cost=normalize_cost(human_cost.total_cost(include_vehicle_cost)),
solver_second_total_cost=normalize_cost(solver_second_cost.total_cost(include_vehicle_cost)),
solver_third_total_cost=normalize_cost(solver_third_cost.total_cost(include_vehicle_cost)),
solver_second_time=int(math.ceil(solver_trace.best_cost_time(2).total_seconds())),
solver_third_time=int(math.ceil(solver_trace.best_cost_time(3).total_seconds()))))
data_frame = pandas.DataFrame(data=results)
print(tabulate.tabulate(data_frame, tablefmt='psql', headers='keys'))
print(tabulate.tabulate(data_frame[['day', 'carers', 'one_carer_visits', 'two_carer_visits', 'missed_visit_penalty',
'planner_total_cost', 'solver_second_total_cost', 'solver_third_total_cost',
'planner_missed_visits', 'solver_second_missed_visits', 'solver_third_missed_visits',
'planner_travel_time', 'solver_second_travel_time', 'solver_third_travel_time', 'solver_second_time',
'solver_third_time']],
tablefmt='latex', headers='keys', showindex=False))
def get_consecutive_visit_time_span(schedule: rows.model.schedule.Schedule, start_time_estimator):
client_visits = collections.defaultdict(list)
for visit in schedule.visits:
client_visits[visit.visit.service_user].append(visit)
for client in client_visits:
visits = client_visits[client]
used_keys = set()
unique_visits = []
for visit in visits:
date_time = start_time_estimator(visit)
if date_time.hour == 0 and date_time.minute == 0:
continue
if visit.visit.key not in used_keys:
used_keys.add(visit.visit.key)
unique_visits.append(visit)
unique_visits.sort(key=start_time_estimator)
client_visits[client] = unique_visits
client_span = collections.defaultdict(datetime.timedelta)
for client in client_visits:
if len(client_visits[client]) < 2:
continue
last_visit = client_visits[client][0]
total_span = datetime.timedelta()
for next_visit in client_visits[client][1:]:
total_span += start_time_estimator(next_visit) - start_time_estimator(last_visit)
last_visit = next_visit
client_span[client] = total_span
return client_span
def get_carer_client_frequency(schedule: rows.model.schedule.Schedule):
client_assigned_carers = collections.defaultdict(collections.Counter)
for visit in schedule.visits:
client_assigned_carers[int(visit.visit.service_user)][int(visit.carer.sap_number)] += 1
return client_assigned_carers
def get_visits(problem: rows.model.problem.Problem, date: datetime.date):
visits = set()
for local_visits in problem.visits:
for visit in local_visits.visits:
if date != visit.date:
continue
visit.service_user = local_visits.service_user
visits.add(visit)
return visits
def get_teams(problem: rows.model.problem.Problem, schedule: rows.model.schedule.Schedule):
multiple_carer_visit_keys = set()
for visit in get_visits(problem, schedule.date):
if visit.carer_count > 1:
multiple_carer_visit_keys.add(visit.key)
client_visit_carers = collections.defaultdict(lambda: collections.defaultdict(list))
for visit in schedule.visits:
if visit.visit.key not in multiple_carer_visit_keys:
continue
client_visit_carers[visit.visit.service_user][visit.visit.key].append(int(visit.carer.sap_number))
for client in client_visit_carers:
for visit_key in client_visit_carers[client]:
client_visit_carers[client][visit_key].sort()
teams = set()
for client in client_visit_carers:
for visit_key in client_visit_carers[client]:
teams.add(tuple(client_visit_carers[client][visit_key]))
return teams
def compare_schedule_quality(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig', ['ProblemPath', 'HumanSolutionPath', 'SolverSolutionPath'])
def compare_quality(solver_trace, problem, human_schedule, solver_schedule, duration_estimator, distance_estimator):
visits = get_visits(problem, solver_trace.date)
multiple_carer_visit_keys = {visit.key for visit in visits if visit.carer_count > 1}
clients = list({int(visit.service_user) for visit in visits})
# number of different carers assigned throughout the day
human_carer_frequency = get_carer_client_frequency(human_schedule)
solver_carer_frequency = get_carer_client_frequency(solver_schedule)
def median_carer_frequency(client_counters):
total_counters = []
for client in client_counters:
# total_counters += len(client_counters[client])
total_counters.append(len(client_counters[client]))
# return total_counters / len(client_counters)
return numpy.median(total_counters)
human_schedule_squared = []
solver_schedule_squared = []
for client in clients:
if client in human_carer_frequency:
human_schedule_squared.append(sum(human_carer_frequency[client][carer] ** 2 for carer in human_carer_frequency[client]))
else:
human_schedule_squared.append(0)
if client in solver_carer_frequency:
solver_schedule_squared.append(sum(solver_carer_frequency[client][carer] ** 2 for carer in solver_carer_frequency[client]))
else:
solver_schedule_squared.append(0)
human_matching_dominates = 0
solver_matching_dominates = 0
for index in range(len(clients)):
if human_schedule_squared[index] > solver_schedule_squared[index]:
human_matching_dominates += 1
elif human_schedule_squared[index] < solver_schedule_squared[index]:
solver_matching_dominates += 1
matching_no_diff = len(clients) - human_matching_dominates - solver_matching_dominates
assert matching_no_diff >= 0
human_schedule_span = get_consecutive_visit_time_span(human_schedule, lambda visit: visit.check_in)
solver_schedule_span = get_consecutive_visit_time_span(solver_schedule, lambda visit: datetime.datetime.combine(visit.date, visit.time))
human_span_dominates = 0
solver_span_dominates = 0
for client in clients:
if human_schedule_span[client] > solver_schedule_span[client]:
human_span_dominates += 1
elif human_schedule_span[client] < solver_schedule_span[client]:
solver_span_dominates += 1
span_no_diff = len(clients) - human_span_dominates - solver_span_dominates
assert span_no_diff > 0
human_teams = get_teams(problem, human_schedule)
solver_teams = get_teams(problem, solver_schedule)
human_schedule_frame = rows.plot.get_schedule_data_frame(human_schedule, problem, duration_estimator, distance_estimator)
solver_schedule_frame = rows.plot.get_schedule_data_frame(solver_schedule, problem, duration_estimator, distance_estimator)
human_visits = human_schedule_frame['Visits'].median()
solver_visits = solver_schedule_frame['Visits'].median()
human_total_overtime = compute_overtime(human_schedule_frame).sum()
solver_total_overtime = compute_overtime(solver_schedule_frame).sum()
return {'problem': str(human_schedule.date),
'visits': len(visits),
'clients': len(clients),
'human_overtime': human_total_overtime,
'solver_overtime': solver_total_overtime,
'human_visits_median': human_visits,
'solver_visits_median': solver_visits,
'human_visit_span_dominates': human_span_dominates,
'solver_visit_span_dominates': solver_span_dominates,
'visit_span_indifferent': span_no_diff,
'human_matching_dominates': human_matching_dominates,
'solver_matching_dominates': solver_matching_dominates,
'human_carer_frequency': median_carer_frequency(human_carer_frequency),
'solver_carer_frequency': median_carer_frequency(solver_carer_frequency),
'matching_indifferent': matching_no_diff,
'human_teams': len(human_teams),
'solver_teams': len(solver_teams)}
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
solver_log_file = os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5.err.log')
problem_data = [ProblemConfig(os.path.join(simulation_dir, 'problems/C350_past.json'),
os.path.join(simulation_dir, 'planner_schedules/C350_planners_201710{0:02d}.json'.format(day)),
os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15, 1)]
solver_traces = read_traces(solver_log_file)
assert len(solver_traces) == len(problem_data)
results = []
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for solver_trace, problem_data in zip(solver_traces, problem_data):
problem = rows.load.load_problem(os.path.join(simulation_dir, problem_data.ProblemPath))
human_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.HumanSolutionPath))
solver_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverSolutionPath))
assert solver_trace.date == human_schedule.date
assert solver_trace.date == solver_schedule.date
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(solver_schedule)
human_schedule_to_use = remove_violated_visits(human_schedule, solver_trace, problem, duration_estimator, distance_estimator)
solver_schedule_to_use = remove_violated_visits(solver_schedule, solver_trace, problem, duration_estimator, distance_estimator)
row = compare_quality(solver_trace, problem, human_schedule_to_use, solver_schedule_to_use, duration_estimator, distance_estimator)
results.append(row)
data_frame = pandas.DataFrame(data=results)
data_frame['human_visit_span_dominates_rel'] = data_frame['human_visit_span_dominates'] / data_frame['clients']
data_frame['human_visit_span_dominates_rel_label'] = data_frame['human_visit_span_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['solver_visit_span_dominates_rel'] = data_frame['solver_visit_span_dominates'] / data_frame['clients']
data_frame['solver_visit_span_dominates_rel_label'] = data_frame['solver_visit_span_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['visit_span_indifferent_rel'] = data_frame['visit_span_indifferent'] / data_frame['clients']
data_frame['human_matching_dominates_rel'] = data_frame['human_matching_dominates'] / data_frame['clients']
data_frame['human_matching_dominates_rel_label'] = data_frame['human_matching_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['solver_matching_dominates_rel'] = data_frame['solver_matching_dominates'] / data_frame['clients']
data_frame['solver_matching_dominates_rel_label'] = data_frame['solver_matching_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['matching_indifferent_rel'] = data_frame['matching_indifferent'] / data_frame['clients']
data_frame['day'] = data_frame['problem'].apply(lambda label: datetime.datetime.strptime(label, '%Y-%m-%d').date().day)
data_frame['human_overtime_label'] = data_frame['human_overtime'].apply(get_time_delta_label)
data_frame['solver_overtime_label'] = data_frame['solver_overtime'].apply(get_time_delta_label)
print(tabulate.tabulate(data_frame, tablefmt='psql', headers='keys'))
print(tabulate.tabulate(data_frame[['day', 'human_visits_median', 'solver_visits_median', 'human_overtime_label', 'solver_overtime_label',
'human_carer_frequency', 'solver_carer_frequency',
'human_matching_dominates_rel_label', 'solver_matching_dominates_rel_label',
'human_teams', 'solver_teams']], tablefmt='latex', showindex=False, headers='keys'))
BenchmarkData = collections.namedtuple('BenchmarkData', ['BestCost', 'BestCostTime', 'BestBound', 'ComputationTime'])
class MipTrace:
__MIP_HEADER_PATTERN = re.compile('^\s*Expl\s+Unexpl\s+|\s+Obj\s+Depth\s+IntInf\s+|\s+Incumbent\s+BestBd\s+Gap\s+|\s+It/Node\s+Time\s*$')
__MIP_LINE_PATTERN = re.compile('^(?P<solution_flag>[\w\*]?)\s*'
'(?P<explored_nodes>\d+)\s+'
'(?P<nodes_to_explore>\d+)\s+'
'(?P<node_relaxation>[\w\.]*)\s+'
'(?P<node_depth>\d*)\s+'
'(?P<fractional_variables>\w*)\s+'
'(?P<incumbent>[\d\.\-]*)\s+'
'(?P<lower_bound>[\d\.\-]*)\s+'
'(?P<gap>[\d\.\%\-]*)\s+'
'(?P<simplex_it_per_node>[\d\.\-]*)\s+'
'(?P<elapsed_time>\d+)s$')
__SUMMARY_PATTERN = re.compile('^Best\sobjective\s(?P<objective>[e\d\.\+]+),\s'
'best\sbound\s(?P<bound>[e\d\.\+]+),\s'
'gap\s(?P<gap>[e\d\.\+]+)\%$')
class MipProgressMessage:
def __init__(self, has_solution, best_cost, lower_bound, elapsed_time):
self.__has_solution = has_solution
self.__best_cost = best_cost
self.__lower_bound = lower_bound
self.__elapsed_time = elapsed_time
@property
def has_solution(self):
return self.__has_solution
@property
def best_cost(self):
return self.__best_cost
@property
def lower_bound(self):
return self.__lower_bound
@property
def elapsed_time(self):
return self.__elapsed_time
def __init__(self, best_objective: float, best_bound: float, events: typing.List[MipProgressMessage]):
self.__best_objective = best_objective
self.__best_bound = best_bound
self.__events = events
@staticmethod
def read_from_file(path) -> 'MipTrace':
events = []
best_objective = float('inf')
best_bound = float('-inf')
with open(path, 'r') as fp:
lines = fp.readlines()
lines_it = iter(lines)
for line in lines_it:
if re.match(MipTrace.__MIP_HEADER_PATTERN, line):
break
next(lines_it, None) # read the empty line
for line in lines_it:
line_match = re.match(MipTrace.__MIP_LINE_PATTERN, line)
if not line_match:
break
raw_solution_flag = line_match.group('solution_flag')
raw_incumbent = line_match.group('incumbent')
raw_lower_bound = line_match.group('lower_bound')
raw_elapsed_time = line_match.group('elapsed_time')
has_solution = raw_solution_flag == 'H' or raw_solution_flag == '*'
incumbent = float(raw_incumbent) if raw_incumbent and raw_incumbent != '-' else float('inf')
lower_bound = float(raw_lower_bound) if raw_lower_bound else float('-inf')
elapsed_time = datetime.timedelta(seconds=int(raw_elapsed_time)) if raw_elapsed_time else datetime.timedelta()
events.append(MipTrace.MipProgressMessage(has_solution, incumbent, lower_bound, elapsed_time))
next(lines_it, None)
for line in lines_it:
line_match = re.match(MipTrace.__SUMMARY_PATTERN, line)
if line_match:
raw_objective = line_match.group('objective')
if raw_objective:
best_objective = float(raw_objective)
raw_bound = line_match.group('bound')
if raw_bound:
best_bound = float(raw_bound)
return MipTrace(best_objective, best_bound, events)
def best_cost(self):
return self.__best_objective
def best_cost_time(self):
for event in reversed(self.__events):
if event.has_solution:
return event.elapsed_time
return datetime.timedelta.max
def best_bound(self):
return self.__best_bound
def computation_time(self):
if self.__events:
return self.__events[-1].elapsed_time
return datetime.timedelta.max
class DummyTrace:
def __init__(self):
pass
def best_cost(self):
return float('inf')
def best_bound(self):
return 0
def best_cost_time(self):
return datetime.timedelta(hours=23, minutes=59, seconds=59)
def compare_benchmark_table(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig', ['ProblemPath', 'Carers', 'Visits', 'Visits2', 'MipSolutionLog',
'CpTeamSolutionLog',
'CpWindowsSolutionLog'])
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
old_simulation_dir = '/home/pmateusz/dev/cordia/simulations/review_simulations_old'
dummy_log = DummyTrace()
problem_configs = [ProblemConfig(os.path.join(simulation_dir, 'benchmark/25/problem_201710{0:02d}_v25m0c3.json'.format(day_number)),
3, 25, 0,
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3.err.log'.format(day_number)))
for day_number in range(1, 15, 1)]
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/25/problem_201710{0:02d}_v25m5c3.json'.format(day_number)),
3, 20, 5,
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m5c3_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_teams_v25m5c3.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_windows_v25m5c3.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/50/problem_201710{0:02d}_v50m0c5.json'.format(day_number)),
5, 50, 0,
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/50/problem_201710{0:02d}_v50m10c5.json'.format(day_number)),
5, 40, 10,
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m10c5_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_teams_v50m10c5.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_windows_v50m10c5.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
logs = []
for problem_config in problem_configs:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if os.path.exists(problem_config.CpTeamSolutionLog):
cp_team_logs = read_traces(problem_config.CpTeamSolutionLog)
if not cp_team_logs:
warnings.warn('File {0} is empty'.format(problem_config.CpTeamSolutionLog))
cp_team_logs = dummy_log
else:
cp_team_log = cp_team_logs[0]
else:
cp_team_logs = dummy_log
if os.path.exists(problem_config.CpWindowsSolutionLog):
cp_window_logs = read_traces(problem_config.CpWindowsSolutionLog)
if not cp_window_logs:
warnings.warn('File {0} is empty'.format(problem_config.CpWindowsSolutionLog))
cp_window_logs = dummy_log
else:
cp_window_log = cp_window_logs[0]
else:
cp_window_logs = dummy_log
if os.path.exists(problem_config.MipSolutionLog):
mip_log = MipTrace.read_from_file(problem_config.MipSolutionLog)
if not mip_log:
warnings.warn('File {0} is empty'.format(problem_config.MipSolutionLog))
mip_log = dummy_log
else:
mip_log = dummy_log
logs.append([problem_config, mip_log, cp_team_log, cp_window_log])
def get_gap(cost: float, lower_bound: float) -> float:
if lower_bound == 0.0:
return float('inf')
return (cost - lower_bound) * 100.0 / lower_bound
def get_delta(cost, cost_to_compare):
return (cost - cost_to_compare) * 100.0 / cost_to_compare
def get_computation_time_label(time: datetime.timedelta) -> str:
return str(time.total_seconds())
data = []
for problem_config, mip_log, cp_team_log, cp_window_log in logs:
data.append(collections.OrderedDict(
date=cp_team_log.date,
visits=problem_config.Visits,
visits_of_two=problem_config.Visits2,
carers=cp_team_log.carers,
penalty=cp_team_log.missed_visit_penalty,
lower_bound=mip_log.best_bound(),
mip_best_cost=mip_log.best_cost(),
mip_best_gap=get_gap(mip_log.best_cost(), mip_log.best_bound()),
mip_best_time=get_computation_time_label(mip_log.best_cost_time()),
team_best_cost=cp_team_log.best_cost(),
team_best_gap=get_gap(cp_team_log.best_cost(), mip_log.best_bound()),
team_best_delta=get_gap(cp_team_log.best_cost(), mip_log.best_cost()),
team_best_time=get_computation_time_label(cp_team_log.best_cost_time()),
windows_best_cost=cp_window_log.best_cost(),
windows_best_gap=get_gap(cp_window_log.best_cost(), mip_log.best_bound()),
windows_best_delta=get_gap(cp_window_log.best_cost(), mip_log.best_cost()),
windows_best_time=get_computation_time_label(cp_window_log.best_cost_time())))
data_frame = pandas.DataFrame(data=data)
def get_duration_label(time_delta: datetime.timedelta) -> str:
assert time_delta.days == 0
hours = int(time_delta.total_seconds() / 3600)
minutes = int(time_delta.total_seconds() / 60 - hours * 60)
seconds = int(time_delta.total_seconds() - 3600 * hours - 60 * minutes)
# return '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
return '{0:,.0f}'.format(time_delta.total_seconds())
def get_cost_label(cost: float) -> str:
return '{0:,.0f}'.format(cost)
def get_gap_label(gap: float) -> str:
return '{0:,.2f}'.format(gap)
def get_problem_label(problem, date: datetime.date):
label = '{0:2d} {1}'.format(date.day, problem.Visits)
if problem.Visits2 == 0:
return label
return label + '/' + str(problem.Visits2)
print_data = []
for problem_config, mip_log, cp_team_log, cp_window_log in logs:
best_cost = min([mip_log.best_cost(), cp_team_log.best_cost(), cp_window_log.best_cost()])
print_data.append(collections.OrderedDict(Problem=get_problem_label(problem_config, cp_team_log.date),
Penalty=get_cost_label(cp_team_log.missed_visit_penalty),
LB=get_cost_label(mip_log.best_bound()),
MIP_COST=get_cost_label(mip_log.best_cost()),
MIP_GAP=get_gap_label(get_gap(mip_log.best_cost(), mip_log.best_bound())),
MIP_DELTA=get_gap_label(get_delta(mip_log.best_cost(), best_cost)),
MIP_TIME=get_duration_label(mip_log.best_cost_time()),
TEAMS_GAP=get_gap_label(get_gap(cp_team_log.best_cost(), mip_log.best_bound())),
TEAMS_DELTA=get_gap_label(get_delta(cp_team_log.best_cost(), best_cost)),
TEAMS_COST=get_cost_label(cp_team_log.best_cost()),
TEAMS_Time=get_duration_label(cp_team_log.best_cost_time()),
WINDOWS_COST=get_cost_label(cp_window_log.best_cost()),
WINDOWS_GAP=get_gap_label(get_gap(cp_window_log.best_cost(), mip_log.best_bound())),
WINDOWS_DELTA=get_gap_label(get_delta(cp_window_log.best_cost(), best_cost)),
WINDOWS_TIME=get_duration_label(cp_window_log.best_cost_time())
))
data_frame = pandas.DataFrame(data=print_data)
print(tabulate.tabulate(
data_frame[['Problem', 'Penalty', 'LB', 'MIP_COST', 'MIP_TIME', 'TEAMS_COST', 'TEAMS_Time', 'WINDOWS_COST', 'WINDOWS_TIME']],
tablefmt='latex', headers='keys', showindex=False))
print(tabulate.tabulate(
data_frame[['Problem', 'MIP_GAP', 'MIP_DELTA', 'MIP_TIME', 'TEAMS_GAP', 'TEAMS_DELTA', 'TEAMS_Time', 'WINDOWS_GAP', 'WINDOWS_DELTA',
'WINDOWS_TIME']],
tablefmt='latex', headers='keys', showindex=False))
@functools.total_ordering
class ProblemMetadata:
WINDOW_LABELS = ['', 'F', 'S', 'M', 'L', 'A']
def __init__(self, case: int, visits: int, windows: int):
assert visits == 20 or visits == 50 or visits == 80
assert 0 <= windows < len(ProblemMetadata.WINDOW_LABELS)
self.__case = case
self.__visits = visits
self.__windows = windows
def __eq__(self, other) -> bool:
if isinstance(other, ProblemMetadata):
return self.case == other.case and self.visits == other.visits and self.__windows == other.windows
return False
def __neq__(self, other) -> bool:
return not (self == other)
def __lt__(self, other) -> bool:
assert isinstance(other, ProblemMetadata)
if self.windows != other.windows:
return self.windows < other.windows
if self.visits != other.visits:
return self.visits < other.visits
if self.case != other.case:
return self.case < other.case
return False
@property
def label(self) -> str:
return '{0:>2}{1}'.format(self.instance_number, self.windows_label)
@property
def windows(self) -> int:
return self.__windows
@property
def windows_label(self) -> str:
return ProblemMetadata.WINDOW_LABELS[self.__windows]
@property
def visits(self) -> int:
return self.__visits
@property
def case(self) -> int:
return self.__case
@property
def instance_number(self) -> int:
if self.__visits == 20:
return self.__case
if self.__visits == 50:
return 5 + self.__case
return 8 + self.__case
def compare_literature_table(args, settings):
LIU2019 = 'liu2019'
AFIFI2016 = 'afifi2016'
DECERLE2018 = 'decerle2018'
GAYRAUD2015 = 'gayraud2015'
PARRAGH2018 = 'parragh2018'
BREDSTROM2008 = 'bredstrom2008combined'
BREDSTROM2007 = 'bredstrom2007branchandprice'
InstanceConfig = collections.namedtuple('InstanceConfig', ['name', 'nickname', 'result', 'who', 'is_optimal'])
instance_data = [
InstanceConfig(name='case_1_20_4_2_1', nickname='1N', result=5.13, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_1', nickname='2N', result=4.98, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_1', nickname='3N', result=5.19, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_1', nickname='4N', result=7.21, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_1', nickname='5N', result=5.37, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_1', nickname='6N', result=14.45, who=DECERLE2018, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_1', nickname='7N', result=13.02, who=DECERLE2018, is_optimal=True),
InstanceConfig(name='case_3_50_10_5_1', nickname='8N', result=34.94, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_1', nickname='9N', result=43.48, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_2_80_16_8_1', nickname='10N', result=12.08, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_1_20_4_2_2', nickname='1S', result=3.55, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_2', nickname='2S', result=4.27, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_2', nickname='3S', result=3.63, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_2', nickname='4S', result=6.14, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_2', nickname='5S', result=3.93, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_2', nickname='6S', result=8.14, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_2', nickname='7S', result=8.39, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_50_10_5_2', nickname='8S', result=9.54, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_2', nickname='9S', result=11.93, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_2', nickname='10S', result=8.54, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_3', nickname='1M', result=3.55, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_3', nickname='2M', result=3.58, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_3', nickname='3M', result=3.33, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_3', nickname='4M', result=5.67, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_3', nickname='5M', result=3.53, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_3', nickname='6M', result=7.7, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_50_10_5_3', nickname='7M', result=7.48, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_3', nickname='8M', result=8.54, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_3', nickname='9M', result=10.92, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_3', nickname='10M', result=7.62, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_4', nickname='1L', result=3.39, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_4', nickname='2L', result=3.42, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_4', nickname='3L', result=3.29, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_4', nickname='4L', result=5.13, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_4', nickname='5L', result=3.34, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_4', nickname='6L', result=7.14, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_4', nickname='7L', result=6.88, who=BREDSTROM2007, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_4', nickname='8L', result=8, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_1_80_16_8_4', nickname='9L', result=10.43, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_4', nickname='10L', result=7.36, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_5', nickname='1H', result=2.95, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_2_20_4_2_5', nickname='2H', result=2.88, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_3_20_4_2_5', nickname='3H', result=2.74, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_4_20_4_2_5', nickname='4H', result=4.29, who=GAYRAUD2015, is_optimal=False),
InstanceConfig(name='case_5_20_4_2_5', nickname='5H', result=2.81, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_1_50_10_5_5', nickname='6H', result=6.48, who=DECERLE2018, is_optimal=False),
InstanceConfig(name='case_2_50_10_5_5', nickname='7H', result=5.71, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_5', nickname='8H', result=6.52, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_1_80_16_8_5', nickname='9H', result=8.51, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_5', nickname='10H', result=6.31, who=PARRAGH2018, is_optimal=False)
]
instance_dirs = ['/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case20',
'/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case50',
'/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case80']
instance_dict = {instance.name: instance for instance in instance_data}
print_data = []
instance_pattern = re.compile(r'case_(?P<case>\d+)_(?P<visits>\d+)_(?P<carers>\d+)_(?P<synchronized_visits>\d+)_(?P<windows>\d+)')
instance_counter = 1
last_visits = None
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for instance_dir in instance_dirs:
for instance in instance_data:
instance_log_path = os.path.join(instance_dir, instance.name + '.dat.err.log')
if not os.path.exists(instance_log_path):
continue
solver_logs = read_traces(instance_log_path)
if not solver_logs:
continue
instance = instance_dict[instance.name]
name_match = instance_pattern.match(instance.name)
if not name_match:
continue
first_solver_logs = solver_logs[0]
case = int(name_match.group('case'))
visits = int(name_match.group('visits'))
carers = int(name_match.group('carers'))
synchronized_visits = int(name_match.group('synchronized_visits'))
windows_configuration = int(name_match.group('windows'))
problem_meta = ProblemMetadata(case, visits, windows_configuration)
if last_visits and last_visits != visits:
instance_counter = 1
normalized_result = float('inf')
if first_solver_logs.best_cost(3) < 100:
normalized_result = round(first_solver_logs.best_cost(3), 2)
delta = round((instance.result - normalized_result) / instance.result * 100, 2)
printable_literature_result = str(instance.result)
if instance.is_optimal:
printable_literature_result += '*'
printable_literature_result += 'cite{{{0}}}'.format(instance.who)
print_data.append(collections.OrderedDict(
metadata=problem_meta,
problem=problem_meta.label,
case=instance_counter,
v1=visits - 2 * synchronized_visits,
v2=synchronized_visits,
carers=carers,
time_windows=problem_meta.windows_label,
literature_result=printable_literature_result,
result=normalized_result,
delta=delta,
time=round(first_solver_logs.best_cost_time(3).total_seconds(), 2) if normalized_result != float('inf') else float('inf')
))
last_visits = visits
instance_counter += 1
print_data.sort(key=lambda dict_obj: dict_obj['metadata'])
print(tabulate.tabulate(
pandas.DataFrame(data=print_data)[['problem', 'carers', 'v1', 'v2', 'literature_result', 'result', 'time', 'delta']],
showindex=False,
tablefmt='latex', headers='keys'))
def compare_planner_optimizer_quality(args, settings):
data_file = getattr(args, __FILE_ARG)
data_frame = pandas.read_csv(data_file)
figsize = (2.5, 5)
labels = ['Planners', 'Algorithm']
data_frame['travel_time'] = data_frame['Travel Time'].apply(parse_pandas_duration)
data_frame['span'] = data_frame['Span'].apply(parse_pandas_duration)
data_frame['overtime'] = data_frame['Overtime'].apply(parse_pandas_duration)
data_frame_planners = data_frame[data_frame['Type'] == 'Planners']
data_frame_solver = data_frame[data_frame['Type'] == 'Solver']
overtime_per_carer = [list((data_frame_planners['overtime'] / data_frame_planners['Carers']).values),
list((data_frame_solver['overtime'] / data_frame_solver['Carers']).values)]
def to_matplotlib_minutes(value):
return value * 60 * 1000000000
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(overtime_per_carer, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Overtime per Carer [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(10), to_matplotlib_minutes(20), to_matplotlib_minutes(30)])
fig.tight_layout()
rows.plot.save_figure('quality_boxplot_overtime')
travel_time_per_carer = [list((data_frame_planners['travel_time'] / data_frame_planners['Carers']).values),
list((data_frame_solver['travel_time'] / data_frame_solver['Carers']).values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(travel_time_per_carer, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Travel Time per Carer [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(30), to_matplotlib_minutes(60),
to_matplotlib_minutes(90), to_matplotlib_minutes(120)])
fig.tight_layout()
rows.plot.save_figure('quality_boxplot_travel_time')
span_per_client = [list((data_frame_planners['span'] / data_frame_planners['Clients']).values),
list((data_frame_solver['span'] / data_frame_solver['Clients']).values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(span_per_client, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Visit Span per Client [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(6 * 60), to_matplotlib_minutes(7 * 60), to_matplotlib_minutes(8 * 60),
to_matplotlib_minutes(9 * 60)])
ax.set_ylim(bottom=6 * 60 * 60 * 1000000000)
fig.tight_layout()
rows.plot.save_figure('quality_span')
teams = [list(data_frame_planners['Teams'].values), list(data_frame_solver['Teams'].values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(teams, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Teams of 2 Carers')
fig.tight_layout()
rows.plot.save_figure('quality_teams')
better_matching = [list(data_frame_planners['Better Matching'].values),
list(data_frame_solver['Better Matching'].values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(better_matching, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Better Client-Carer Matching')
fig.tight_layout()
rows.plot.save_figure('quality_matching')
def parse_percent(value):
value_to_use = value.replace('%', '')
return float(value_to_use) / 100.0
def parse_duration_seconds(value):
return datetime.timedelta(seconds=value)
def compare_benchmark(args, settings):
data_file_path = getattr(args, __FILE_ARG)
data_frame = pandas.read_csv(data_file_path)
data_frame['relative_cost_difference'] = data_frame['Relative Cost Difference'].apply(parse_percent)
data_frame['relative_gap'] = data_frame['Relative Gap'].apply(parse_percent)
data_frame['time'] = data_frame['Time'].apply(parse_duration_seconds)
matplotlib.rcParams.update({'font.size': 18})
labels = ['MS', 'IP']
low_labels = ['Gap', 'Delta', 'Time']
cp_frame = data_frame[data_frame['Solver'] == 'CP']
mip_frame = data_frame[data_frame['Solver'] == 'MIP']
def get_series(frame, configuration):
num_visits, num_visits_of_2 = configuration
filtered_frame = frame[(frame['Visits'] == num_visits) & (frame['Synchronized Visits'] == num_visits_of_2)]
return [filtered_frame['relative_gap'].values, filtered_frame['relative_cost_difference'].values,
filtered_frame['time'].values]
def seconds(value):
return value * 1000000000
def minutes(value):
return 60 * seconds(value)
def hours(value):
return 3600 * seconds(value)
limit_configurations = [[[None, minutes(1) + seconds(15)], [0, minutes(9)]],
[[None, minutes(1) + seconds(30)], [0, hours(4) + minutes(30)]],
[[0, minutes(3) + seconds(30)], [0, hours(4) + minutes(30)]],
[[0, minutes(3) + seconds(30)], [0, hours(4) + minutes(30)]]]
yticks_configurations = [
[[0, seconds(15), seconds(30), seconds(45), minutes(1)], [0, minutes(1), minutes(2), minutes(4), minutes(8)]],
[[0, seconds(15), seconds(30), seconds(45), minutes(1), minutes(1) + seconds(15)],
[0, hours(1), hours(2), hours(3), hours(4)]],
[[0, minutes(1), minutes(2), minutes(3)], [0, hours(1), hours(2), hours(3), hours(4)]],
[[0, minutes(1), minutes(2), minutes(3)], [0, hours(1), hours(2), hours(3), hours(4)]]]
problem_configurations = [(25, 0), (25, 5), (50, 0), (50, 10)]
def format_timedelta_pandas(x, pos=None):
if x < 0:
return None
time_delta = pandas.to_timedelta(x)
hours = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_HOUR)
minutes = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_MIN) - 60 * hours
seconds = int(time_delta.total_seconds() - 3600 * hours - 60 * minutes)
return '{0:01d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
def format_percent(x, pox=None):
return int(x * 100.0)
for index, problem_config in enumerate(problem_configurations):
fig, axes = matplotlib.pyplot.subplots(1, 2)
cp_gap, cp_delta, cp_time = get_series(cp_frame, problem_config)
mip_gap, mip_delta, mip_time = get_series(mip_frame, problem_config)
cp_time_limit, mip_time_limit = limit_configurations[index]
cp_yticks, mip_yticks = yticks_configurations[index]
cp_ax, mip_ax = axes
first_color_config = dict(flierprops=dict(marker='.'),
medianprops=dict(color=FOREGROUND_COLOR),
boxprops=dict(color=FOREGROUND_COLOR),
whiskerprops=dict(color=FOREGROUND_COLOR),
capprops=dict(color=FOREGROUND_COLOR))
second_color_config = dict(flierprops=dict(marker='.'),
medianprops=dict(color=FOREGROUND_COLOR2),
boxprops=dict(color=FOREGROUND_COLOR2),
whiskerprops=dict(color=FOREGROUND_COLOR2),
capprops=dict(color=FOREGROUND_COLOR2))
cp_ax.boxplot([cp_gap, cp_delta, []], **second_color_config)
cp_twinx = cp_ax.twinx()
cp_twinx.boxplot([[], [], cp_time], **first_color_config)
cp_twinx.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
cp_ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_percent))
cp_twinx.tick_params(axis='y', labelcolor=FOREGROUND_COLOR)
cp_ax.set_xlabel('Multistage')
cp_ax.set_xticklabels(low_labels, rotation=45)
cp_ax.set_ylim(bottom=-0.05, top=1)
cp_ax.set_ylabel('Delta, Gap [%]')
cp_twinx.set_ylim(bottom=cp_time_limit[0], top=cp_time_limit[1])
if cp_yticks:
cp_twinx.set_yticks(cp_yticks)
mip_ax.boxplot([mip_gap, mip_delta, []], **second_color_config)
mip_twinx = mip_ax.twinx()
mip_twinx.boxplot([[], [], mip_time], **first_color_config)
mip_twinx.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
mip_twinx.tick_params(axis='y', labelcolor=FOREGROUND_COLOR)
mip_ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_percent))
mip_ax.set_xlabel('IP')
mip_ax.set_xticklabels(low_labels, rotation=45)
mip_ax.set_ylim(bottom=-0.05, top=1)
mip_twinx.set_ylabel('Computation Time [H:MM:SS]', color=FOREGROUND_COLOR)
mip_twinx.set_ylim(bottom=mip_time_limit[0], top=mip_time_limit[1])
if mip_yticks:
mip_twinx.set_yticks(mip_yticks)
fig.tight_layout(w_pad=0.0)
rows.plot.save_figure('benchmark_boxplot_{0}_{1}'.format(problem_config[0], problem_config[1]))
matplotlib.pyplot.cla()
matplotlib.pyplot.close(fig)
def old_debug(args, settings):
problem = rows.plot.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
solution_file = get_or_raise(args, __SOLUTION_FILE_ARG)
schedule = rows.plot.load_schedule(solution_file)
schedule_date = schedule.metadata.begin
carer_dairies = {
carer_shift.carer.sap_number:
next((diary for diary in carer_shift.diaries if diary.date == schedule_date), None)
for carer_shift in problem.carers}
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
data_set = []
with rows.plot.create_routing_session() as session:
for route in schedule.routes():
travel_time = datetime.timedelta()
for source, destination in route.edges():
source_loc = location_finder.find(source.visit.service_user)
if not source_loc:
logging.error('Failed to resolve location of %s', source.visit.service_user)
continue
destination_loc = location_finder.find(destination.visit.service_user)
if not destination_loc:
logging.error('Failed to resolve location of %s', destination.visit.service_user)
continue
distance = session.distance(source_loc, destination_loc)
if distance is None:
logging.error('Distance cannot be estimated between %s and %s', source_loc, destination_loc)
continue
travel_time += datetime.timedelta(seconds=distance)
service_time = datetime.timedelta()
for visit in route.visits:
if visit.check_in and visit.check_out:
observed_duration = visit.check_out - visit.check_in
if observed_duration.days < 0:
logging.error('Observed duration %s is negative', observed_duration)
service_time += observed_duration
else:
logging.warning(
'Visit %s is not supplied with information on check-in and check-out information',
visit.key)
service_time += visit.duration
available_time = functools.reduce(operator.add, (event.duration
for event in
carer_dairies[route.carer.sap_number].events))
data_set.append([route.carer.sap_number,
available_time,
service_time,
travel_time,
float(service_time.total_seconds() + travel_time.total_seconds())
/ available_time.total_seconds()])
data_set.sort(key=operator.itemgetter(4))
data_frame = pandas.DataFrame(columns=['Carer', 'Availability', 'Service', 'Travel', 'Usage'], data=data_set)
figure, axis = matplotlib.pyplot.subplots()
indices = numpy.arange(len(data_frame.index))
time_delta_converter = rows.plot.TimeDeltaConverter()
width = 0.35
travel_series = numpy.array(time_delta_converter(data_frame.Travel))
service_series = numpy.array(time_delta_converter(data_frame.Service))
idle_overtime_series = list(data_frame.Availability - data_frame.Travel - data_frame.Service)
idle_series = numpy.array(time_delta_converter(
map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(time_delta_converter(
map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(), idle_overtime_series)))
service_handle = axis.bar(indices, service_series, width, bottom=time_delta_converter.zero)
travel_handle = axis.bar(indices, travel_series, width,
bottom=service_series + time_delta_converter.zero_num)
idle_handle = axis.bar(indices, idle_series, width,
bottom=service_series + travel_series + time_delta_converter.zero_num)
overtime_handle = axis.bar(indices, overtime_series, width,
bottom=idle_series + service_series + travel_series + time_delta_converter.zero_num)
axis.yaxis_date()
axis.yaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S"))
axis.legend((travel_handle, service_handle, idle_handle, overtime_handle),
('Travel', 'Service', 'Idle', 'Overtime'), loc='upper right')
matplotlib.pyplot.show()
def show_working_hours(args, settings):
__WIDTH = 0.25
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
shift_file = get_or_raise(args, __FILE_ARG)
shift_file_base_name, shift_file_ext = os.path.splitext(os.path.basename(shift_file))
output_file_base_name = getattr(args, __OUTPUT, shift_file_base_name)
__EVENT_TYPE_OFFSET = {'assumed': 2, 'contract': 1, 'work': 0}
__EVENT_TYPE_COLOR = {'assumed': color_map.colors[0], 'contract': color_map.colors[4], 'work': color_map.colors[2]}
handles = {}
frame = pandas.read_csv(shift_file)
dates = frame['day'].unique()
for current_date in dates:
frame_to_use = frame[frame['day'] == current_date]
carers = frame_to_use['carer'].unique()
figure, axis = matplotlib.pyplot.subplots()
try:
current_date_to_use = datetime.datetime.strptime(current_date, '%Y-%m-%d')
carer_index = 0
for carer in carers:
carer_frame = frame_to_use[frame_to_use['carer'] == carer]
axis.bar(carer_index + 0.25, 24 * 3600, 0.75, bottom=0, color='grey', alpha=0.3)
for index, row in carer_frame.iterrows():
event_begin = datetime.datetime.strptime(row['begin'], '%Y-%m-%d %H:%M:%S')
event_end = datetime.datetime.strptime(row['end'], '%Y-%m-%d %H:%M:%S')
handle = axis.bar(carer_index + __EVENT_TYPE_OFFSET[row['event type']] * __WIDTH,
(event_end - event_begin).total_seconds(),
__WIDTH,
bottom=(event_begin - current_date_to_use).total_seconds(),
color=__EVENT_TYPE_COLOR[row['event type']])
handles[row['event type']] = handle
carer_index += 1
axis.legend([handles['work'], handles['contract'], handles['assumed']],
['Worked', 'Available', 'Forecast'], loc='upper right')
axis.grid(linestyle='dashed')
axis.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_time))
axis.yaxis.set_ticks(numpy.arange(0, 24 * 3600, 2 * 3600))
axis.set_ylim(6 * 3600, 24 * 60 * 60)
rows.plot.save_figure(output_file_base_name + '_' + current_date)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compute_overtime(frame):
idle_overtime_series = list(frame.Availability - frame.Travel - frame.Service)
idle_series = numpy.array(
list(map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(list(map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(), idle_overtime_series)))
return overtime_series
class Node:
def __init__(self,
index: int,
next: int,
visit: rows.model.visit.Visit,
visit_start_min: datetime.datetime,
visit_start_max: datetime.datetime,
break_start: typing.Optional[datetime.datetime],
break_duration: datetime.timedelta,
travel_duration: datetime.timedelta):
self.__index = index
self.__next = next
self.__visit = visit
self.__visit_start_min = visit_start_min
self.__visit_start_max = visit_start_max
self.__break_start = break_start
self.__break_duration = break_duration
self.__travel_duration = travel_duration
@property
def index(self) -> int:
return self.__index
@property
def next(self) -> int:
return self.__next
@property
def visit_key(self) -> int:
return self.__visit.key
@property
def visit_start(self) -> datetime.datetime:
return datetime.datetime.combine(self.__visit.date, self.__visit.time)
@property
def visit_start_min(self) -> datetime.datetime:
return self.__visit_start_min
@property
def visit_start_max(self) -> datetime.datetime:
return self.__visit_start_max
@property
def carer_count(self) -> int:
return self.__visit.carer_count
@property
def visit_duration(self) -> datetime.timedelta:
return self.__visit.duration
@property
def break_start(self) -> datetime.datetime:
return self.__break_start
@property
def break_duration(self) -> datetime.timedelta:
return self.__break_duration
@property
def travel_duration(self) -> datetime.timedelta:
return self.__travel_duration
@property
def service_user(self) -> str:
return self.__visit.service_user
class Mapping:
def __init__(self, routes, problem, settings, time_window_span):
self.__index_to_node = {}
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
user_tag_finder.reload()
local_routes = {}
current_index = 0
def find_visit(item) -> rows.model.visit.Visit:
current_diff = sys.maxsize
visit_match = None
for visit_batch in problem.visits:
if visit_batch.service_user != item.service_user:
continue
for visit in visit_batch.visits:
if visit.date != item.date or visit.tasks != item.tasks:
continue
if item.key == visit.key:
# exact match
return visit
visit_total_time = visit.time.hour * 3600 + visit.time.minute * 60
item_total_time = item.time.hour * 3600 + item.time.minute * 60
diff_total_time = abs(visit_total_time - item_total_time)
if diff_total_time <= time_window_span.total_seconds() and diff_total_time < current_diff:
visit_match = visit
current_diff = diff_total_time
assert visit_match is not None
return visit_match
current_index = 0
with rows.plot.create_routing_session() as routing_session:
for route in routes:
local_route = []
previous_visit = None
previous_index = None
current_visit = None
break_start = None
break_duration = datetime.timedelta()
for item in route.nodes:
if isinstance(item, rows.model.past_visit.PastVisit):
current_visit = item.visit
if previous_visit is None:
if break_start is None:
diary = problem.get_diary(route.carer, current_visit.date)
break_start = diary.events[0].begin - datetime.timedelta(minutes=30)
node = Node(current_index,
current_index + 1,
rows.model.visit.Visit(date=current_visit.date,
time=break_start,
duration=datetime.timedelta(),
service_user=current_visit.service_user),
break_start,
break_start,
break_start,
break_duration,
datetime.timedelta())
self.__index_to_node[current_index] = node
local_route.append(node)
current_index += 1
previous_visit = current_visit
previous_index = current_index
break_start = None
break_duration = datetime.timedelta()
current_index += 1
continue
previous_location = user_tag_finder.find(previous_visit.service_user)
current_location = user_tag_finder.find(current_visit.service_user)
travel_time = datetime.timedelta(seconds=routing_session.distance(previous_location, current_location))
previous_visit_match = find_visit(previous_visit)
node = Node(previous_index,
current_index,
previous_visit,
previous_visit_match.datetime - time_window_span,
previous_visit_match.datetime + time_window_span,
break_start,
break_duration,
travel_time)
self.__index_to_node[previous_index] = node
local_route.append(node)
break_start = None
break_duration = datetime.timedelta()
previous_visit = current_visit
previous_index = current_index
current_index += 1
if isinstance(item, rows.model.rest.Rest):
if break_start is None:
break_start = item.start_time
else:
break_start = item.start_time - break_duration
break_duration += item.duration
visit_match = find_visit(previous_visit)
node = Node(previous_index,
-1,
previous_visit,
visit_match.datetime - time_window_span,
visit_match.datetime + time_window_span,
break_start,
break_duration,
datetime.timedelta())
self.__index_to_node[previous_index] = node
local_route.append(node)
local_routes[route.carer] = local_route
self.__routes = local_routes
service_user_to_index = collections.defaultdict(list)
for index in self.__index_to_node:
node = self.__index_to_node[index]
service_user_to_index[node.service_user].append(index)
self.__siblings = {}
for service_user in service_user_to_index:
num_indices = len(service_user_to_index[service_user])
for left_pos in range(num_indices):
left_index = service_user_to_index[service_user][left_pos]
left_visit = self.__index_to_node[left_index]
if left_visit.carer_count == 1:
continue
for right_pos in range(left_pos + 1, num_indices):
right_index = service_user_to_index[service_user][right_pos]
right_visit = self.__index_to_node[right_index]
if right_visit.carer_count == 1:
continue
if left_visit.visit_start_min == right_visit.visit_start_min and left_visit.visit_start_max == right_visit.visit_start_max:
assert left_index != right_index
self.__siblings[left_index] = right_index
self.__siblings[right_index] = left_index
def indices(self):
return list(self.__index_to_node.keys())
def routes(self) -> typing.Dict[rows.model.carer.Carer, typing.List[Node]]:
return self.__routes
def node(self, index: int) -> Node:
return self.__index_to_node[index]
def find_index(self, visit_key: int) -> int:
for index in self.__index_to_node:
if self.__index_to_node[index].visit_key == visit_key:
return index
return None
def sibling(self, index: int) -> typing.Optional[Node]:
if index in self.__siblings:
sibling_index = self.__siblings[index]
return self.__index_to_node[sibling_index]
return None
def graph(self) -> networkx.DiGraph:
edges = []
for carer in self.__routes:
for node in self.__routes[carer]:
if node.next != -1:
assert node.index != node.next
edges.append([node.index, node.next])
sibling_node = self.sibling(node.index)
if sibling_node is not None:
if node.index < sibling_node.index:
assert node.index != sibling_node.index
edges.append([node.index, sibling_node.index])
if node.next != -1:
assert sibling_node.index != node.next
edges.append([sibling_node.index, node.next])
return networkx.DiGraph(edges)
def create_mapping(settings, problem, schedule) -> Mapping:
mapping_time_windows_span = datetime.timedelta(minutes=90)
return Mapping(schedule.routes, problem, settings, mapping_time_windows_span)
class StartTimeEvaluator:
def __init__(self, mapping: Mapping, problem: rows.model.problem.Problem, schedule: rows.model.schedule.Schedule):
self.__mapping = mapping
self.__problem = problem
self.__schedule = schedule
self.__sorted_indices = list(networkx.topological_sort(self.__mapping.graph()))
self.__initial_start_times = self.__get_initial_start_times()
def get_start_times(self, duration_callback) -> typing.List[datetime.datetime]:
start_times = copy.copy(self.__initial_start_times)
for index in self.__sorted_indices:
node = self.__mapping.node(index)
current_sibling_node = self.__mapping.sibling(node.index)
if current_sibling_node:
max_start_time = max(start_times[node.index], start_times[current_sibling_node.index])
start_times[node.index] = max_start_time
if max_start_time > start_times[current_sibling_node.index]:
start_times[current_sibling_node.index] = max_start_time
if current_sibling_node.next is not None and current_sibling_node.next != -1:
start_times[current_sibling_node.next] = self.__get_next_arrival(current_sibling_node, start_times, duration_callback)
if node.next is None or node.next == -1:
continue
next_arrival = self.__get_next_arrival(node, start_times, duration_callback)
if next_arrival > start_times[node.next]:
start_times[node.next] = next_arrival
return start_times
def get_delays(self, start_times: typing.List[datetime.datetime]) -> typing.List[datetime.timedelta]:
return [start_times[index] - self.__mapping.node(index).visit_start_max for index in self.__mapping.indices()]
def __get_next_arrival(self, local_node: Node, start_times, duration_callback) -> datetime.datetime:
break_done = False
if local_node.break_duration is not None \
and local_node.break_start is not None \
and local_node.break_start + local_node.break_duration <= start_times[local_node.index]:
break_done = True
local_visit_key = self.__mapping.node(local_node.index).visit_key
local_next_arrival = start_times[local_node.index] + duration_callback(local_visit_key) + local_node.travel_duration
if not break_done and local_node.break_start is not None:
if local_next_arrival >= local_node.break_start:
local_next_arrival += local_node.break_duration
else:
local_next_arrival = local_node.break_start + local_node.break_duration
return local_next_arrival
def __get_initial_start_times(self) -> typing.List[datetime.datetime]:
start_times = [self.__mapping.node(index).visit_start_min for index in self.__mapping.indices()]
carer_routes = self.__mapping.routes()
for carer in carer_routes:
diary = self.__problem.get_diary(carer, self.__schedule.date)
assert diary is not None
nodes = carer_routes[carer]
nodes_it = iter(nodes)
first_visit_node = next(nodes_it)
start_min = max(first_visit_node.visit_start_min, diary.events[0].begin - datetime.timedelta(minutes=30))
start_times[first_visit_node.index] = start_min
for node in nodes_it:
start_min = max(node.visit_start_min, diary.events[0].begin - datetime.timedelta(minutes=30))
start_times[node.index] = start_min
return start_times
class EssentialRiskinessEvaluator:
def __init__(self, settings, history, problem, schedule):
self.__settings = settings
self.__history = history
self.__problem = problem
self.__schedule = schedule
self.__schedule_start = datetime.datetime.combine(self.__schedule.date, datetime.time())
self.__mapping = None
self.__sample = None
self.__start_times = None
self.__delay = None
def run(self):
self.__mapping = create_mapping(self.__settings, self.__problem, self.__schedule)
history_time_windows_span = datetime.timedelta(hours=2)
self.__sample = self.__history.build_sample(self.__problem, self.__schedule.date, history_time_windows_span)
self.__start_times = [[datetime.datetime.max for _ in range(self.__sample.size)] for _ in self.__mapping.indices()]
self.__delay = [[datetime.timedelta.max for _ in range(self.__sample.size)] for _ in self.__mapping.indices()]
start_time_evaluator = StartTimeEvaluator(self.__mapping, self.__problem, self.__schedule)
for scenario in range(self.__sample.size):
def get_visit_duration(visit_key: int) -> datetime.timedelta:
if visit_key is None:
return datetime.timedelta()
return self.__sample.visit_duration(visit_key, scenario)
scenario_start_times = start_time_evaluator.get_start_times(get_visit_duration)
delay = start_time_evaluator.get_delays(scenario_start_times)
for index in range(len(scenario_start_times)):
self.__start_times[index][scenario] = scenario_start_times[index]
self.__delay[index][scenario] = delay[index]
def calculate_index(self, visit_key: int) -> float:
visit_index = self.__find_index(visit_key)
records = [local_delay.total_seconds() for local_delay in self.__delay[visit_index]]
records.sort()
num_records = len(records)
if records[num_records - 1] <= 0:
return 0.0
total_delay = 0.0
position = num_records - 1
while position >= 0 and records[position] >= 0:
total_delay += records[position]
position -= 1
if position == -1:
return float('inf')
delay_budget = 0
while position > 0 and delay_budget + float(position + 1) * records[position] + total_delay > 0:
delay_budget += records[position]
position -= 1
delay_balance = delay_budget + float(position + 1) * records[position] + total_delay
if delay_balance < 0:
riskiness_index = min(0.0, records[position + 1])
assert riskiness_index <= 0.0
remaining_balance = total_delay + delay_budget + float(position + 1) * riskiness_index
assert remaining_balance >= 0.0
riskiness_index -= math.ceil(remaining_balance / float(position + 1))
assert riskiness_index * float(position + 1) + delay_budget + total_delay <= 0.0
return -riskiness_index
elif delay_balance > 0:
return float('inf')
else:
return records[position]
def get_delays(self, visit_key) -> typing.List[datetime.timedelta]:
index = self.__find_index(visit_key)
return self.__delay[index]
def find_carer(self, visit_key: int) -> typing.Optional[rows.model.carer.Carer]:
for carer in self.__mapping.routes():
for node in self.__mapping.routes()[carer]:
if node.visit_key == visit_key:
return carer
return None
def find_route(self, index: int) -> typing.Optional[typing.List[Node]]:
routes = self.__mapping.routes()
for carer in routes:
for node in routes[carer]:
if node.index == index:
return routes[carer]
return None
def print_route_for_visit(self, visit_key):
carer = self.find_carer(visit_key)
self.print_route(carer)
def print_route(self, carer):
route = self.__mapping.routes()[carer]
data = [['index', 'key', 'visit_start', 'visit_duration', 'travel_duration', 'break_start', 'break_duration']]
for node in route:
if node.visit_key is None:
duration = 0
else:
duration = int(self.__sample.visit_duration(node.visit_key, 0).total_seconds())
data.append([node.index,
node.visit_key,
int(self.__datetime_to_delta(self.__start_times[node.index][0]).total_seconds()),
duration,
int(node.travel_duration.total_seconds()),
int(self.__datetime_to_delta(node.break_start).total_seconds()) if node.break_start is not None else 0,
int(node.break_duration.total_seconds())])
print(tabulate.tabulate(data))
def print_start_times(self, visit_key: int):
print('Start Times - Visit {0}:'.format(visit_key))
selected_index = self.__find_index(visit_key)
for scenario_number in range(self.__sample.size):
print('{0:<4}{1}'.format(scenario_number,
int(self.__datetime_to_delta(self.__start_times[selected_index][scenario_number]).total_seconds())))
def print_delays(self, visit_key: int):
print('Delays - Visit {0}:'.format(visit_key))
selected_index = self.__find_index(visit_key)
for scenario_number in range(self.__sample.size):
print('{0:<4}{1}'.format(scenario_number, int(self.__delay[selected_index][scenario_number].total_seconds())))
def visit_keys(self) -> typing.List[int]:
visit_keys = [self.__mapping.node(index).visit_key for index in self.__mapping.indices() if self.__mapping.node(index).visit_key is not None]
visit_keys.sort()
return visit_keys
def __find_index(self, visit_key: int) -> typing.Optional[int]:
for index in self.__mapping.indices():
if self.__mapping.node(index).visit_key == visit_key:
return index
return None
def __datetime_to_delta(self, value: datetime.datetime) -> datetime.timedelta:
return value - self.__schedule_start
def to_frame(self):
records = []
for visit_index in self.__mapping.indices():
visit_key = self.__mapping.node(visit_index).visit_key
if visit_key is None:
continue
for scenario_number in range(self.__sample.size):
records.append({'visit': visit_key, 'scenario': scenario_number, 'delay': self.__delay[visit_index][scenario_number]})
return pandas.DataFrame(data=records)
@property
def mapping(self):
return self.__mapping
@property
def start_times(self):
return self.__start_times
@property
def delay(self):
return self.__delay
@staticmethod
def time_to_delta(time: datetime.time) -> datetime.timedelta:
seconds = time.hour * 3600 + time.minute * 60 + time.second
return datetime.timedelta(seconds=seconds)
def load_history():
root_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations/problems'
cached_path = os.path.join(root_dir, 'C350_history.pickle')
path = os.path.join(root_dir, 'C350_history.json')
import pickle
if os.path.exists(cached_path):
with open(cached_path, 'rb') as input_stream:
return pickle.load(input_stream)
with open(path, 'r') as input_stream:
history = rows.model.history.History.load(input_stream)
with open(cached_path, 'wb') as output_stream:
pickle.dump(history, output_stream)
return history
def compare_delay(args, settings):
compare_delay_visits_path = 'compare_delay_visits.hdf'
compare_instances_path = 'compare_instances.hdf'
def load_data():
root_problem_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations/solutions'
problem = rows.load.load_problem('/home/pmateusz/dev/cordia/simulations/current_review_simulations/problems/C350_past.json')
history = load_history()
cost_schedules \
= [rows.load.load_schedule(os.path.join(root_problem_dir, 'c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15)]
cost_traces = read_traces(os.path.join(root_problem_dir, 'c350past_distv90b90e30m1m1m5.err.log'))
risk_schedules = [rows.load.load_schedule(os.path.join(root_problem_dir, 'c350past_riskv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15)]
risk_traces = read_traces(os.path.join(root_problem_dir, 'c350past_riskv90b90e30m1m1m5.err.log'))
return problem, history, cost_schedules, cost_traces, risk_schedules, risk_traces
def get_visit_duration(visit_key: int) -> datetime.timedelta:
if visit_key is None:
return datetime.timedelta()
visit = history.get_visit(visit_key)
assert visit is not None
return visit.real_duration
def get_visit_delays(schedule: rows.model.schedule.Schedule) -> typing.Dict[int, datetime.timedelta]:
mapping = create_mapping(settings, problem, schedule)
delay_evaluator = StartTimeEvaluator(mapping, problem, schedule)
start_times = delay_evaluator.get_start_times(get_visit_duration)
delays = delay_evaluator.get_delays(start_times)
return {mapping.node(index).visit_key: delays[index] for index in range(len(delays))}
problem = None
if os.path.exists(compare_delay_visits_path):
visits_frame = pandas.read_hdf(compare_delay_visits_path)
else:
problem, history, cost_schedules, cost_traces, risk_schedules, risk_traces = load_data()
visit_data_set = []
for index in range(len(cost_schedules)):
cost_schedule = cost_schedules[index]
risk_schedule = risk_schedules[index]
schedule_date = cost_schedule.date
assert schedule_date == risk_schedule.date
cost_visit_delays = get_visit_delays(cost_schedule)
risk_visit_delays = get_visit_delays(risk_schedule)
visit_keys = set(cost_visit_delays.keys())
for visit_key in risk_visit_delays:
visit_keys.add(visit_key)
for visit_key in visit_keys:
record = collections.OrderedDict(visit_key=visit_key, date=schedule_date)
if visit_key in cost_visit_delays:
record['cost_delay'] = cost_visit_delays[visit_key]
if visit_key in risk_visit_delays:
record['risk_delay'] = risk_visit_delays[visit_key]
visit_data_set.append(record)
visits_frame = pandas.DataFrame(data=visit_data_set)
visits_frame.to_hdf(compare_delay_visits_path, key='a')
if os.path.exists(compare_instances_path):
instances_frame = pandas.read_hdf(compare_instances_path)
else:
if problem is None:
problem, history, cost_schedules, cost_traces, risk_schedules, risk_traces = load_data()
instances_data_set = []
for index in range(len(cost_schedules)):
cost_schedule = cost_schedules[index]
cost_trace = cost_traces[index]
risk_schedule = risk_schedules[index]
risk_trace = risk_traces[index]
schedule_date = cost_schedule.date
local_visits_delay_frame = (visits_frame[visits_frame['date'] == schedule_date])[['risk_delay', 'cost_delay']]
instances_data_set.append(collections.OrderedDict(date=schedule_date,
cost_cost=cost_trace.best_cost(),
cost_num_visits=len(cost_schedule.visits),
cost_mean_delay=local_visits_delay_frame.mean().loc['cost_delay'],
risk_cost=risk_trace.last_cost(),
risk_num_visits=len(risk_schedule.visits),
risk_mean_delay=local_visits_delay_frame.mean().loc['risk_delay']))
instances_frame = pandas.DataFrame(data=instances_data_set)
instances_frame.to_hdf(compare_instances_path, key='a')
def compute_riskiness(args, settings):
# load optimised schedules
# load riskiness schedules
schedule = rows.load.load_schedule('/home/pmateusz/dev/cordia/simulations/current_review_simulations/cp_schedules/riskiness/2017-10-01.gexf')
problem = rows.load.load_problem('/home/pmateusz/dev/cordia/simulations/current_review_simulations/problems/C350_past.json')
with open('/home/pmateusz/dev/cordia/simulations/current_review_simulations/problems/C350_history.json', 'r') as input_stream:
history = rows.model.history.History.load(input_stream)
riskiness_evaluator = EssentialRiskinessEvaluator(settings, history, problem, schedule)
riskiness_evaluator.run()
selected_carers = {riskiness_evaluator.find_carer(8582722)}
for carer in selected_carers:
riskiness_evaluator.print_route(carer)
class DelayRecords:
def __init__(self, objective, instance, delays):
self.__objective = objective
self.__instance = instance
self.__delays = delays
def load_third_stage_records(settings):
root_problem_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations/solutions'
frame_path = os.path.join(root_problem_dir, 'delay_results.hdf')
if os.path.exists(frame_path):
return pandas.read_hdf(frame_path)
else:
objective_pattern = [['cost', 'c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'],
['reduction', 'c350past_redv90b90e30m1m1m5_201710{0:02d}.gexf'],
['risk', 'c350past_riskv90b90e30m1m1m5_201710{0:02d}.gexf']]
problem = rows.load.load_problem('/home/pmateusz/dev/cordia/simulations/current_review_simulations/problems/C350_past.json')
history = load_history()
configurations = [(objective, instance, os.path.join(root_problem_dir, pattern.format(instance)))
for objective, pattern in objective_pattern
for instance in range(1, 15)]
solver_traces = {'cost': read_traces(os.path.join(root_problem_dir, 'c350past_distv90b90e30m1m1m5.err.log')),
'reduction': read_traces(os.path.join(root_problem_dir, 'c350past_redv90b90e30m1m1m5.err.log')),
'risk': read_traces(os.path.join(root_problem_dir, 'c350past_riskv90b90e30m1m1m5.err.log'))}
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
frames = []
for objective, instance, schedule_file in tqdm.tqdm(configurations):
schedule = rows.load.load_schedule(schedule_file)
solver_trace = solver_traces[objective][instance - 1]
schedule_cost = get_schedule_cost(schedule, solver_trace, problem, distance_estimator)
riskiness_evaluator = EssentialRiskinessEvaluator(settings, history, problem, schedule)
riskiness_evaluator.run()
frame = riskiness_evaluator.to_frame()
frame['objective'] = objective
frame['instance'] = instance
frame['travel_time'] = schedule_cost.travel_time
frame['carers_used'] = schedule_cost.carers_used
frame['visits_missed'] = schedule_cost.visits_missed
frames.append(frame)
frame = pandas.concat(frames)
frame.to_hdf(frame_path, key='a')
return frame
def get_max_average_delays(frame):
delays = []
visits = frame['visit'].unique()
for visit in visits:
delays.append(frame[frame['visit'] == visit]['delay'].mean().to_timedelta64())
delays.sort()
return delays
def compare_third_stage_table(args, settings):
frame = load_third_stage_records(settings)
instances = frame['instance'].unique()
instances.sort()
objectives = frame['objective'].unique()
objectives.sort()
data_set = []
for instance in instances:
for objective in objectives:
instance_frame = frame[(frame['instance'] == instance) & (frame['objective'] == objective)]
delays = get_max_average_delays(instance_frame)
max_delay = max(delays) / numpy.timedelta64(1, 'm')
travel_time = instance_frame['travel_time'].max()
carer_used = instance_frame['carers_used'].max()
visits_missed = instance_frame['visits_missed'].max()
data_set.append({'instance': instance,
'objective': objective,
'max_delay': max_delay,
'travel_time': travel_time / numpy.timedelta64(1, 'h'),
'carer_used': carer_used,
'visits_missed': visits_missed})
result_set = pandas.DataFrame(data=data_set)
del frame
records = []
for instance in instances:
record = collections.OrderedDict()
record['instance'] = instance
for metric in ['travel_time', 'carer_used', 'max_delay', 'visits_missed']:
for objective in objectives:
label = '{0}_{1}'.format(metric, objective)
record[label] = result_set[(result_set['instance'] == instance) & (result_set['objective'] == objective)][metric].min()
records.append(record)
data_set = pandas.DataFrame(data=records)
print(tabulate.tabulate( | pandas.DataFrame(data=data_set) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import joblib
# In this section I read the static and real-time data from cibike API
@st.cache
def load_stations():
# This read citibike static information: capacity, name, id, lat and lon of each station and name of the station
df_station_information = pd.read_json('https://gbfs.citibikenyc.com/gbfs/en/station_information.json')
# read all the stations and store in data frame : stations
station_iter = len(df_station_information['data'][0])
station = []
for j in range(station_iter):
zipped = zip(['station_name', 'station_id', 'lat', 'lon', 'capacity'],
[df_station_information['data'][0][j]['name'],
df_station_information['data'][0][j]['station_id'],
df_station_information['data'][0][j]['lat'],
df_station_information['data'][0][j]['lon'],
df_station_information['data'][0][j]['capacity']
])
station.append(dict(zipped))
station = pd.DataFrame.from_dict(station)
return station
@st.cache
def load_distances():
df_dist = pd.read_csv('distances.csv')
return df_dist
@st.cache
def load_station_realtime():
df_st_status = pd.read_json('https://gbfs.citibikenyc.com/gbfs/en/station_status.json') # online # available
return df_st_status
@st.cache
def realtime_weather():
request = 'https://mesonet.agron.iastate.edu/cgi-bin/request/' \
'asos.py?station=NYC&data=tmpf&data=relh&data=feel&data=' \
'sped&data=p01i&data=vsby&year1=2020&month1=9&day1=28&year2=' \
'2020&month2=9&day2=28&tz=America%2FNew_York&format=onlycomma&latlon=' \
'no&missing=M&trace=T&direct=no&report_type=1&report_type=2'
df_weather = pd.read_csv(f'{request}', na_values=['M', 'T'])
df_weather['p01i'] = df_weather['p01i'].astype(float)
df_weather.fillna(method='ffill')
df_weather['datetime'] = pd.to_datetime(df_weather['valid'])
df_weather['Month'] = df_weather['datetime'].dt.month
df_weather['Day'] = df_weather['datetime'].dt.day
df_weather['Hour'] = df_weather['datetime'].dt.hour
df_weather['Weekday'] = ((df_weather['datetime'].dt.dayofweek) // 5 == 0).astype(float)
df_weather = df_weather.groupby(['Month', 'Day', 'Hour', 'Weekday']).mean().reset_index()
weather = df_weather.tail(1)
weather = weather.fillna(0)
return weather
def id_to_name(ids, station):
return station[station.station_id == ids].station_name.to_list()[0]
def find_realtime_status(all_stations, selected_station):
status = all_stations['data'][0]
status = next(item for item in status if item['station_id'] == selected_station)
return status
def nearby_stations(distances, all_stations, selected_station):
nearby_ids = distances.sort_values(by=selected_station)['station_id'].astype(str)[1:6].to_list()
nearby_dis = distances.sort_values(by=selected_station)[selected_station][1:6].to_list()
nearby_names = [id_to_name(ids, all_stations) for ids in nearby_ids]
return nearby_ids, nearby_names, nearby_dis
def make_prediction(weather, all_stations, selected_station):
station = all_stations.loc[all_stations['station_name'] == selected_station]
station = station.astype({'station_id': int})
result = pd.concat([weather.reset_index(), station.reset_index()], axis=1)
result = result.drop(['index', 'station_name', 'lat', 'lon', 'capacity'], axis=1)
result = result.fillna(0)
model_input = result.reindex(
['Month', 'Day', 'Hour', 'Weekday', 'station_id', 'tmpf', 'relh', 'feel', 'sped', 'p01i', 'vsby'],
axis=1).values
filename = 'rfc_2020.joblib'
model = joblib.load(filename)
model_output = model.predict(model_input)
return model_output
def station_reliability_start(free_units, flow):
if (flow < 0 and abs(flow) <= 0.5 * free_units):
reliability = 'good to pick up bikes'
elif (flow < 0 and abs(flow) > .5 * free_units and abs(flow) <= .8 * free_units):
reliability = 'running out of bikes'
elif (flow < 0 and abs(flow) > .8 * free_units) or (free_units < 2):
reliability = 'running out fast'
else:
reliability = 'good to pick up bikes'
return reliability
def station_reliability_stop(free_units, flow):
if (flow > 0 and abs(flow) <= .5 * free_units):
reliability = 'good to dock in'
elif (flow > 0 and abs(flow) > .5 * free_units and abs(flow) <= .8 * free_units):
reliability = 'docks filling up'
elif (flow > 0 and abs(flow) > .8 ) or (free_units < 1):
reliability = 'filling up fast'
else:
reliability = 'good to dock in'
return reliability
# streamlit web app title
st.write("""# Dock Right NY!""")
# load the stations names, capacity, lat and lon
stations = load_stations() # , df_station_status, df_dist
# streamlit web app: create a crop down menu with list of the stations to select
start_station_name = st.selectbox('Select Pick Up Station', stations['station_name'])
# The user selects station name now I need to obtain station_id
start_station = stations.loc[stations['station_name'] == start_station_name]
start_station_id = str(start_station['station_id'].to_list()[0])
# Now that I have tne station ID I can find the real-time number of available bikes and docks
df_station_status = load_station_realtime()
start_status = find_realtime_status(df_station_status, start_station_id)
'Pick up station has: ', start_status['num_bikes_available'], 'free bikes.'
# Now estimate flow and calculate reliability
current_weather = realtime_weather()
pred_sel_start = make_prediction(current_weather, stations, start_station_name)
start_reliablity = station_reliability_start(start_status['num_bikes_available'], pred_sel_start)
'This station is:', start_reliablity, 'within the next hour'
if start_reliablity != 'good to pick up bikes':
# Now find available bikes and docks in 5 nearby stations
df_distances = load_distances()
start_near_ids, start_near_names, start_near_dist = nearby_stations(df_distances, stations, start_station_id)
start_near_status = [find_realtime_status(df_station_status, ind_st)['num_bikes_available'] for ind_st in
start_near_ids]
# Now estimate the inflow based on historic data and current weather
pred = [make_prediction(current_weather, stations, station_name) for station_name in start_near_names]
reliab = []
for i in range(len(pred)):
reliab.append(station_reliability_start(pred[i], start_near_status[i]))
show = pd.DataFrame()
show['station'] = start_near_names
show['Distance (m)'] = [round(x * 1000) for x in start_near_dist]
show['Realtime Free Bikes'] = start_near_status
show['Reliability'] = reliab#pred
st.write('Alternative Pick up Stations:', show)
# streamlit web app get the stop station from user
stop_station_name = st.selectbox('Select Drop Off Station', stations['station_name'])
# The user selects station name now I need to obtain station_id
stop_station = stations.loc[stations['station_name'] == stop_station_name]
stop_station_id = str(stop_station['station_id'].to_list()[0])
# Now that I have tne station ID I can find the real-time number of available bikes and docks
stop_status = find_realtime_status(df_station_status, stop_station_id)
'Drop off station has: ', stop_status['num_docks_available'], 'free docks.' \
# Now estimate flow and calculate reliability
pred_sel_stop = make_prediction(current_weather, stations, stop_station_name)
stop_reliability = station_reliability_stop(stop_status['num_docks_available'], pred_sel_stop)
'This station is:', stop_reliability, 'within the next hour'
if stop_reliability != 'good to dock in':
df_distances = load_distances()
# Now find available bikes and docks in 5 nearby stations
stop_near_ids, stop_near_names, stop_near_dist = nearby_stations(df_distances, stations, stop_station_id)
stop_near_status = [find_realtime_status(df_station_status, ind_st)['num_docks_available'] for ind_st in stop_near_ids]
# Now estimate the inflow based on historic data and current weather
pred_stop = [make_prediction(current_weather, stations, station_name) for station_name in stop_near_names]
reliab_stop = []
for i in range(len(pred_stop)):
reliab_stop.append(station_reliability_stop(pred_stop[i], stop_near_status[i]))
show2 = pd.DataFrame()
show2['station'] = stop_near_names
show2['Distance (m)'] = [round(x * 1000) for x in stop_near_dist]
show2['Realtime Free Docks'] = stop_near_status
show2['Reliability'] = reliab_stop#pred_stop
st.write('Alternative Drop off Stations:', show2)
map_data = | pd.concat([start_station[['lat', 'lon']], stop_station[['lat', 'lon']]]) | pandas.concat |
"""Preparation of a demo dataset for the study with synthetic constraints
Script which saves one demo dataset in a format suitable for the synthetic-constraints pipeline.
Usage: python -m cffs.synthetic_constraints.prepare_demo_dataset --help
"""
import argparse
import pathlib
import pandas as pd
import sklearn.datasets
from cffs.utilities import data_utility
# Store a sklearn demo dataset as prediction-ready (X-y format) CSVs.
def prepare_demo_dataset(data_dir: pathlib.Path) -> None:
if not data_dir.is_dir():
print('Data directory does not exist. We create it.')
data_dir.mkdir(parents=True)
if len(data_utility.list_datasets(data_dir)) > 0:
print('Data directory already contains prediction-ready datasets. ' +
'Files might be overwritten, but not deleted.')
dataset = sklearn.datasets.load_boston()
features = dataset['feature_names']
X = | pd.DataFrame(data=dataset['data'], columns=features) | pandas.DataFrame |
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.base import BaseEstimator
from sklearn.neighbors import BallTree
class XGBSEBaseEstimator(BaseEstimator):
"""
Base class for all estimators in xgbse. Implements explainability through prototypes.
"""
def __init__(self):
self.persist_train = False
self.index_id = None
self.tree = None
self.bst = None
def get_neighbors(
self, query_data, index_data=None, query_id=None, index_id=None, n_neighbors=30
):
"""
Search for portotypes (size: n_neighbors) for each unit in a
dataframe X. If units array is specified, comparables will be returned using
its identifiers. If not, a dataframe of comparables indexes for each sample
in X is returned.
Args:
query_data (pd.DataFrame): Dataframe of features to be used as input
query_id ([pd.Series, np.array]): Series or array of identification for each sample of query_data.
Will be used in set_index if specified.
index_id ([pd.Series, np.array]): Series or array of identification for each sample of index_id.
If specified, comparables will be returned using this identifier.
n_neighbors (int): Number of neighbors/comparables to be considered.
Returns:
comps_df (pd.DataFrame): A dataframe of comparables/neighbors for each
evaluated sample. If units identifier is specified, the output dataframe
is converted to use units the proper identifier for each sample. The
reference sample is considered to be the index of the dataframe and
its comparables are its specific row values.
"""
if index_data is None and not self.persist_train:
raise ValueError("please specify the index_data")
if index_id is None and not self.persist_train:
index_id = index_data.index.copy()
if query_id is None:
query_id = query_data.index.copy()
if self.persist_train:
index_id = self.index_id
index = self.tree
else:
index_matrix = xgb.DMatrix(index_data)
index_leaves = self.bst.predict(
index_matrix,
pred_leaf=True,
iteration_range=(0, self.bst.best_iteration + 1),
)
if len(index_leaves.shape) == 1:
index_leaves = index_leaves.reshape(-1, 1)
index = BallTree(index_leaves, metric="hamming")
query_matrix = xgb.DMatrix(query_data)
query_leaves = self.bst.predict(
query_matrix,
pred_leaf=True,
iteration_range=(0, self.bst.best_iteration + 1),
)
if len(query_leaves.shape) == 1:
query_leaves = query_leaves.reshape(-1, 1)
compset = index.query(query_leaves, k=n_neighbors + 1, return_distance=False)
map_to_id = np.vectorize(lambda x: index_id[x])
comparables = map_to_id(compset)
comps_df = | pd.DataFrame(comparables[:, 1:]) | pandas.DataFrame |
import numpy as np
from scipy.interpolate.interpolate import interp1d
import matplotlib.pyplot as plt
import os
import fdasrsf as fs
import pyBASS as pb
####################################################################################################################################################################################
## flyer plates
def movingaverage(interval, window_size=3):
window = np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
## read in sims - may be faster ways, but this is general
def read_church_sims(path_sims):
files_unsorted = os.listdir(path_sims)
order = [int(str.split(ff, "_")[-1][0:4]) for ff in files_unsorted] # get first four characters after last underscore
files = [x for _, x in sorted(zip(order, files_unsorted))] # sorted list of files
nsims = len(files)
files_full = [path_sims + '/' + files[i] for i in range(len(files))]
dat_sims = [None] * nsims
for i in range(nsims):
with open(files_full[i]) as file:
temp = file.readlines()
dat_sims[i] = np.vstack([np.float_(str.split(temp[j], ',')) for j in range(2,len(temp))])
return dat_sims
## read in obs
def read_church_obs(path_obs):
with open(path_obs) as file:
temp = file.readlines()
char0 = [temp[i][0] for i in range(len(temp))] # get first character of each line
start_line = np.where([(char0[i] != '#') for i in range(len(temp))])[0][0] + 1 # find first line of data (last line that starts with # plus 2)
dat_obs = np.vstack([np.float_(str.split(temp[j], ',')) for j in range(start_line,len(temp))])
return dat_obs
## interpolate sims and obs to a grid, and possibly shift sims, smooth sims, scale obs
def snap2it(time_range, dat_sims, dat_obs, ntimes=200, move_start=False, start_cutoff=1e-4, start_exclude=0, smooth_avg_sims = 1, obs_scale=1.):
## new grid of times
#time_range = [0.0, 0.9]
#ntimes = 200
time_grid = np.linspace(time_range[0], time_range[1], ntimes)
nsims = len(dat_sims)
if move_start:
## get jump off time for each sim (first time with smoothed vel > start_cutoff, excluding first start_exclude points), smoothed to make consistent with smoothing later
#start_exclude = 200
#start_cutoff = 1e-4
sim_start_times = [0] * nsims
for i in range(nsims):
xx = movingaverage(dat_sims[i][:, 1], smooth_avg_sims)
idx = np.where(xx[start_exclude:] > start_cutoff)[0][0] + start_exclude
sim_start_times[i] = dat_sims[i][idx, 0]
## interpolate sims to be on new grid of times, and smooth
sims_grid = np.empty([nsims, ntimes])
for i in range(nsims):
ifunc = interp1d(dat_sims[i][:,0] - sim_start_times[i], movingaverage(dat_sims[i][:,1], smooth_avg_sims), kind = 'cubic')
sims_grid[i, :] = ifunc(time_grid)
## interpolate obs to be on new grid of times, transform units, and possibly smooth
ifunc = interp1d(dat_obs[:,0], movingaverage(dat_obs[:,1]/obs_scale, 1), kind = 'cubic', bounds_error=False, fill_value=0.)
obs_grid = ifunc(time_grid)
return {"sims": sims_grid, "obs": obs_grid, "time": time_grid}
## do warping
def warp(grid_dat, lam=0.01):
out = fs.fdawarp(grid_dat['sims'].T, grid_dat['time'])
out.multiple_align_functions(grid_dat['sims'][0], parallel=True, lam=lam)
#out.multiple_align_functions(obs_all_list[j], parallel=True, lam=.001)
gam_sim = out.gam
vv_sim = fs.geometry.gam_to_v(out.gam)
ftilde_sim = out.fn
out2 = fs.pairwise_align_functions(grid_dat['sims'][0], grid_dat['obs'], grid_dat['time'], lam=lam)
#out2 = fs.pairwise_align_functions(obs_all_list[j], obs_all_list[j], xx_all_list[j], lam=.01)
gam_obs = out2[1]
vv_obs = fs.geometry.gam_to_v(out2[1])
ftilde_obs = out2[0]
return {'gam_sim':gam_sim, 'vv_sim':vv_sim, 'ftilde_sim':ftilde_sim, 'gam_obs':gam_obs, 'vv_obs':vv_obs, 'ftilde_obs':ftilde_obs}
path_sims = os.path.abspath('../../../Desktop/impala_data/Ti64.Flyer.NewAlpha/Ti64_Church2002_Manganin_591_612/Results/')
path_obs = os.path.abspath('../../../git/impala/data/ti-6al-4v/Data/FlyerPlate/Church2002/Church2002_Fig2-300k-591m_s-12mm.csv')
dat_sims = read_church_sims(path_sims)
dat_obs = read_church_obs(path_obs)
## plot, shows time discrepancy, velocity unit differences
for i in range(len(dat_sims)):
plt.plot(dat_sims[i][:,0],dat_sims[i][:,1])
plt.plot(dat_obs[:,0], dat_obs[:,1]/100)
plt.show()
# put on same grid
dat_grid = snap2it([0.0, 0.4], dat_sims, dat_obs, move_start=True, start_cutoff=1e-4, start_exclude=200, smooth_avg_sims = 3, obs_scale=100)
plt.plot(dat_grid['time'], dat_grid['sims'].T)
plt.plot(dat_grid['time'], dat_grid['obs'], color='black')
plt.show()
# warp
dat_warp = warp(dat_grid, lam=.001)
plt.plot(dat_warp['ftilde_sim'],'black')
plt.plot(dat_warp['ftilde_sim'][:,0],'blue')
plt.plot(dat_warp['ftilde_obs'],'red')
plt.show()
plt.plot(dat_warp['gam_sim'])
plt.plot(dat_warp['gam_obs'],color='black')
plt.show()
plt.plot(dat_warp['vv_sim'])
plt.plot(dat_warp['vv_obs'],color='black')
plt.show()
def emu(inputs, dat_warp, ntest=20):
ho = np.random.choice(dat_warp['ftilde_sim'].shape[1], ntest, replace=False)
Xtrain = np.delete(inputs, ho, axis=0)
Xtest = inputs[ho]
ftilde_train = np.delete(dat_warp['ftilde_sim'], ho, axis=1)
ftilde_test = dat_warp['ftilde_sim'][:,ho]
vv_train = np.delete(dat_warp['vv_sim'], ho, axis=1)
vv_test = dat_warp['vv_sim'][:,ho]
gam_train = np.delete(dat_warp['gam_sim'], ho, axis=1)
gam_test = dat_warp['gam_sim'][:,ho]
emu_ftilde = pb.bassPCA(Xtrain, ftilde_train.T, ncores=15, npc=15, nmcmc=50000, nburn=40000, thin=10)
emu_vv = pb.bassPCA(Xtrain, vv_train.T, ncores=15, npc=9, nmcmc=50000, nburn=40000, thin=10)
# emu_ftilde.plot()
# emu_vv.plot()
pred_ftilde = emu_ftilde.predict(Xtrain)
predtest_ftilde = emu_ftilde.predict(Xtest)
pred_vv = emu_vv.predict(Xtrain)
pred_gam = fs.geometry.v_to_gam(pred_vv.mean(0).T)
predtest_vv = emu_vv.predict(Xtest)
predtest_gam = fs.geometry.v_to_gam(predtest_vv.mean(0).T)
ftilde_resids = pred_ftilde.mean(0).T - ftilde_train
ftilde_resids_test = predtest_ftilde.mean(0).T - ftilde_test
gam_resids = pred_gam.mean(0).T - gam_train
gam_resids_test = predtest_gam.mean(0).T - gam_test
vv_resids = pred_vv.mean(0).T - vv_train
vv_resids_test = predtest_vv.mean(0).T - vv_test
return {'emu_ftilde':emu_ftilde, 'emu_vv':emu_vv, 'pred_ftilde':pred_ftilde,
'predtest_ftilde':predtest_ftilde, 'pred_vv':pred_vv, 'pred_gam':pred_gam,
'predtest_vv':predtest_vv, 'predtest_gam':predtest_gam, 'ftilde_resids':ftilde_resids,
'ftilde_resids_test':ftilde_resids_test, 'gam_resids':gam_resids, 'gam_resids_test':gam_resids_test,
'vv_resids':vv_resids, 'vv_resids_test':vv_resids_test}
sim_inputs = np.genfromtxt('../../../Desktop/impala_data/Ti64.Flyer.NewAlpha/Ti64.design.ptw.1000.txt', skip_header=1)
in2 = pb.normalize(sim_inputs, np.array([np.min(sim_inputs,0),np.max(sim_inputs,0)]).T)
emus = emu(in2, dat_warp, 100)
emus['emu_ftilde'].plot()
emus['emu_vv'].plot()
plt.plot(emus['ftilde_resids_test'],'r.')
plt.plot(emus['ftilde_resids'],color='black')
plt.show()
plt.plot(emus['vv_resids_test'],'r.')
plt.plot(emus['vv_resids'],'black')
plt.show()
ntest = 200
inputs = in2
ho = np.random.choice(dat_warp['ftilde_sim'].shape[1], ntest, replace=False)
Xtrain = np.delete(inputs, ho, axis=0)
Xtest = inputs[ho]
ftilde_train = np.delete(dat_warp['ftilde_sim'], ho, axis=1)
ftilde_test = dat_warp['ftilde_sim'][:,ho]
vv_train = np.delete(dat_warp['vv_sim'], ho, axis=1)
vv_test = dat_warp['vv_sim'][:,ho]
gam_train = np.delete(dat_warp['gam_sim'], ho, axis=1)
gam_test = dat_warp['gam_sim'][:,ho]
emu_ftilde = pb.bassPCA(in2, dat_warp['ftilde_sim'].T, ncores=15, npc=15, nmcmc=50000, nburn=40000, thin=10)
emu_ftilde.bm_list[0].plot()
yy = emu_ftilde.newy[0]
bmod = pb.bass(Xtrain,np.delete(yy, ho), nmcmc=50000, nburn=40000, thin=10)
bmod.plot()
plt.scatter(bmod.predict(Xtrain).mean(0), np.delete(yy, ho))
plt.show()
plt.scatter(bmod.predict(Xtest).mean(0), yy[ho])
plt.show()
bad = np.hstack((np.where(bmod.predict(Xtest).mean(0)>0.1)[0], np.where(bmod.predict(Xtest).mean(0)< -.07)[0]))
plt.plot(ftilde_test,'black')
plt.plot(ftilde_test[:,bad],'red')
plt.show()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# this produces obs, sims, warped obs, warped sims, warping function obs, warping function sims, aligned emu, vv emu
# model will need obs and emus...do I need to save the rest of this? If not, lets just dill the emus and obs (pretty sure you cant put them in sql table)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
########################################################################################################################
## Taylor cylinder data
def read_sims(path_sims):
files_unsorted = os.listdir(path_sims)
order = [int(str.split(ff, "_")[-1][0:4]) for ff in files_unsorted] # get first four characters after last underscore
files = [x for _, x in sorted(zip(order, files_unsorted))] # sorted list of files
nsims = len(files)
files_full = [path_sims + '/' + files[i] for i in range(len(files))]
dat_sims = [None] * nsims
for i in range(nsims):
with open(files_full[i]) as file:
temp = file.readlines()
dat_sims[i] = np.vstack([np.float_(str.split(temp[j], ',')) for j in range(2,len(temp))])
return dat_sims
## read in obs
def read_obs(path_obs):
with open(path_obs) as file:
temp = file.readlines()
char0 = [temp[i][0] for i in range(len(temp))] # get first character of each line
start_line = np.where([(char0[i] != '#') for i in range(len(temp))])[0][0] + 1 # find first line of data (last line that starts with # plus 2)
dat_obs = np.vstack([np.float_(str.split(temp[j], ',')) for j in range(start_line,len(temp))])
return dat_obs
def snap2it(dat_sims, dat_obs, nt=200, mm=0):
#time_range = [0.0, 0.9]
#ntimes = 200
t_grid = np.linspace(0, 1, nt)
nsims = len(dat_sims)
height = [dat_sims[i][:, 1].max() for i in range(nsims)]
## interpolate sims to be on new grid of times, and smooth
sims_grid = np.empty([nsims, nt])
for i in range(nsims):
idx = np.where(dat_sims[i][:,0] > mm)[0]
t_grid = np.linspace(dat_sims[i][idx,1][0], dat_sims[i][idx,1][-1], nt)
ifunc = interp1d(dat_sims[i][idx,1], dat_sims[i][idx,0], kind = 'linear')
sims_grid[i, :] = ifunc(t_grid)
## interpolate obs to be on new grid of times, transform units, and possibly smooth
ifunc = interp1d(dat_obs[:,0], dat_obs[:,1], kind = 'linear', bounds_error=False, fill_value=0.)
t_grid = np.linspace(dat_obs[0,0], dat_obs[-1,0], nt)
obs_grid = ifunc(t_grid)
obs_height = dat_obs[:,0].max()
return {"sims": sims_grid, "obs": obs_grid, 'length_sims': height, 'length_obs': obs_height}
def warp(grid_dat, t_grid = np.linspace(0, 1, 400), lam=0.01):
out = fs.fdawarp(grid_dat['sims'].T, t_grid)
out.multiple_align_functions(grid_dat['sims'][0], parallel=True, lam=lam)
#out.multiple_align_functions(obs_all_list[j], parallel=True, lam=.001)
gam_sim = out.gam
vv_sim = fs.geometry.gam_to_v(out.gam)
ftilde_sim = out.fn
out2 = fs.pairwise_align_functions(grid_dat['sims'][0], grid_dat['obs'], t_grid, lam=lam)
#out2 = fs.pairwise_align_functions(obs_all_list[j], obs_all_list[j], xx_all_list[j], lam=.01)
gam_obs = out2[1]
vv_obs = fs.geometry.gam_to_v(out2[1])
ftilde_obs = out2[0]
return {'gam_sim':gam_sim, 'vv_sim':vv_sim, 'ftilde_sim':ftilde_sim, 'gam_obs':gam_obs, 'vv_obs':vv_obs, 'ftilde_obs':ftilde_obs}
sim_inputs = np.genfromtxt('../../../Desktop/impala_data/Ti64.TaylorCyl.NewAlpha/Ti64.design.ptw.1000.txt', skip_header=1)
path_sims = os.path.abspath('../../../Desktop/impala_data/Ti64.TaylorCyl.NewAlpha/taylor_mcd2007_fig4/Results/')
path_obs = os.path.abspath('../../../git/impala/data/ti-6al-4v/Data/TaylorCyl/McDonald2007/McDonald2007_Fig4_Taylor.csv')
path_sims = os.path.abspath('../../../Desktop/impala_data/Ti64.TaylorCyl.NewAlpha/taylor_mcd2007_fig5/Results/')
path_obs = os.path.abspath('../../../git/impala/data/ti-6al-4v/Data/TaylorCyl/McDonald2007/McDonald2007_Fig5_Taylor.csv')
dat_sims = read_sims(path_sims)
dat_obs = read_obs(path_obs)/10
for i in range(len(dat_sims)):
plt.plot(dat_sims[i][:,0], dat_sims[i][:,1],'grey')
plt.plot(dat_obs[:,1], dat_obs[:,0],'black')
plt.show()
dat_snap = snap2it(dat_sims, dat_obs, 400, mm = dat_obs[-1,1])
plt.plot(dat_snap['sims'].T)
plt.show()
#dat_warp = warp(dat_prep)
#plt.plot(dat_warp['ftilde_sim'])
#plt.show()
files_unsorted = os.listdir(path_sims)
run = [int(str.split(ff, "_")[-1][0:4]) for ff in files_unsorted]
inputs = sim_inputs[np.sort(run)-1,:] # exclude failed runs
ntest = 20
ho = np.random.choice(len(run), ntest, replace=False)
Xtrain = np.delete(inputs, ho, axis=0)
Xtest = inputs[ho]
mod_length = pb.bass(Xtrain, np.delete(dat_snap['length_sims'], ho))
mod_length.plot()
plt.scatter(mod_length.predict(Xtest).mean(0), np.array(dat_snap['length_sims'])[ho])
pb.abline(1,0)
plt.show()
mod_profile = pb.bassPCA(Xtrain, np.delete(dat_snap['sims'], ho, axis=0), npc=15, ncores=10)
mod_profile.plot()
pred_train = mod_profile.predict(Xtrain)
pred_test = mod_profile.predict(Xtest)
plt.plot(pred_train.mean(0).T - np.delete(dat_snap['sims'], ho, axis=0).T,'black')
plt.plot(pred_test.mean(0).T - dat_snap['sims'][ho].T,'red')
plt.show()
plt.scatter(mod_profile.predict(Xtest).mean(0), np.array(dat_snap['sims'])[ho])
pb.abline(1,0)
plt.show()
##########################
path_sims = os.path.abspath('../../../Desktop/impala_data/Ti64.TaylorCyl.NewAlpha/taylor_yu2011_T4/Results/')
dat_sims = read_sims(path_sims)
for i in range(len(dat_sims)):
plt.plot(dat_sims[i][:,0],dat_sims[i][:,1])
plt.show()
length_sims = np.array([max(dat_sims[i][:,1]) for i in range(len(dat_sims))])
files_unsorted = os.listdir(path_sims)
run = [int(str.split(ff, "_")[-1][0:4]) for ff in files_unsorted]
inputs = sim_inputs[np.sort(run)-1,:] # exclude failed runs
ntest = 20
ho = np.random.choice(len(run), ntest, replace=False)
Xtrain = np.delete(inputs, ho, axis=0)
Xtest = inputs[ho]
mod_length = pb.bass(Xtrain, np.delete(length_sims, ho))
mod_length.plot()
plt.scatter(mod_length.predict(Xtest).mean(0), length_sims[ho])
pb.abline(1,0)
plt.show()
# Test | Deformed length (y-coordinate, cm)
# Yu2011_T1 | 2.4493
# Yu2011_T2 | 2.4095
# Yu2011_T3 | 2.3901
# Yu2011_T4 | 2.3702
##############################################################################################################################
def add_tc(path_sims, path_obs, xps):
dat_sims = read_sims(path_sims)
dat_obs = read_obs(path_obs)/10
dat_snap = snap2it(dat_sims, dat_obs, 400, mm = dat_obs[-1,1])
files_unsorted = os.listdir(path_sims)
run = [int(str.split(ff, "_")[-1][0:4]) for ff in files_unsorted]
inputs = pd.DataFrame(sim_inputs[np.sort(run)-1,:]) # exclude failed runs
xps.append({'obs': pd.DataFrame(np.array(dat_snap['length_obs']).reshape([1,1])),
'sim_inputs': inputs,
'sim_outputs': pd.DataFrame(dat_snap['length_sims']),
'fname': path_sims})
xps.append({'obs': pd.DataFrame(dat_snap['obs']),
'sim_inputs': inputs,
'sim_outputs': pd.DataFrame(dat_snap['sims']),
'fname': path_sims})
#return xps
def add_tc_length(path_sims, obs, xps):
dat_sims = read_sims(path_sims)
length_sims = np.array([max(dat_sims[i][:,1]) for i in range(len(dat_sims))])
files_unsorted = os.listdir(path_sims)
run = [int(str.split(ff, "_")[-1][0:4]) for ff in files_unsorted]
inputs = sim_inputs[np.sort(run)-1,:] # exclude failed runs
xps.append({'obs': pd.DataFrame(np.array(obs).reshape([1,1])),
'sim_inputs': | pd.DataFrame(inputs) | pandas.DataFrame |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = | period_range("2007-01", periods=20, freq="M") | pandas.period_range |
import tkinter as tk
import pandas as pd
import numpy as np
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
import csv
import ipython_genutils as ip
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
import warnings
from collections import Counter
import datetime
import wordcloud
import program as p1
import program2 as p2
PLOT_COLORS = ["#268bd2", "#0052CC", "#FF5722", "#b58900", "#003f5c"]
pd.options.display.float_format = '{:.2f}'.format
sns.set(style="ticks")
plt.rc('figure', figsize=(8, 5), dpi=100)
plt.rc('axes', labelpad=20, facecolor="#ffffff", linewidth=0.4, grid=True, labelsize=14)
plt.rc('patch', linewidth=0)
plt.rc('xtick.major', width=0.2)
plt.rc('ytick.major', width=0.2)
plt.rc('grid', color='#9E9E9E', linewidth=0.4)
plt.rc('font', family='Arial', weight='400', size=10)
plt.rc('text', color='#282828')
plt.rc('savefig', pad_inches=0.3, dpi=300)
#process Decision tree
df = pd.read_csv(r"./TrendingJoTrending.csv", header=None)
df[0] = pd.to_numeric(df[0], errors='coerce')
df = df.replace(np.nan, 0, regex=True)
df[1] = pd.to_numeric(df[1], errors='coerce')
df = df.replace(np.nan, 1, regex=True)
df[2] = | pd.to_numeric(df[2], errors='coerce') | pandas.to_numeric |
from svd.path_config import build_localized_explanations_path, build_figure_perturbation_path
from svd.utils.io import pickle_load
from svd.explanations.explanation_utils import THRESHOLD
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="whitegrid")
import pandas as pd
import numpy as np
from argparse import ArgumentParser
font = {'size': 22}
plt.rc('font', **font)
if __name__ == '__main__':
# This script is used to create Figure 3 of the paper.
parser = ArgumentParser()
parser.add_argument("--attack_type", required=True, choices=["wave", "spec"])
parser.add_argument("--subset", required=True, choices=["valid", "test"])
parser.add_argument("--baseline", default="min", choices=["min", "zero", "mean"])
parser.add_argument("--target", required=True, type=int, choices=[0, 1])
parser.add_argument("--cumulative", action='store_true', default=False)
args = parser.parse_args()
first_layer = "0mean"
attack_type = args.attack_type
subset = args.subset
baseline = args.baseline
target = args.target
cumulative = args.cumulative
model_name = 'model_log_{}'.format(first_layer)
predictions = {
1: [], 3: [], 5: []
}
components_collected_k = {}
for k in [1, 3, 5]:
storage_path = build_localized_explanations_path(model_name, baseline, attack_type, target, subset, k)
components_collected_k[k] = pickle_load(storage_path)
predictions[k] = pickle_load(storage_path.replace("components_localized_", "predictions_localized_"))
print(components_collected_k[5][0])
print(components_collected_k[3][0])
components_aggregated = {
1: [], 3: [], 5: []
}
for k in [1, 3, 5]:
components_current_k = components_collected_k[k]
for comp_row in components_current_k:
explanation_indeces = comp_row[1]
if target == 0:
explanation_indeces = explanation_indeces[::-1]
explained_weights = [w[0] for w in explanation_indeces[:k]]
components_aggregated[k].append(len(set(comp_row[0]).intersection(explained_weights)))
df_ = pd.concat([pd.DataFrame({"components": components_aggregated[1], "predictions": predictions[1], "k": 1}),
| pd.DataFrame({"components": components_aggregated[3], "predictions": predictions[3], "k": 3}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Diff DFs
# Compute various diffs between two Pandas DataFrames
#
# See [examples](#examples) / [usage](#diff) below.
# In[1]:
from IPython.display import HTML
from numpy import nan
from pandas import concat, Index, IndexSlice as idx, isna, MultiIndex
from re import sub
def neq(l, r): return l!=r and not (isna(l) and | isna(r) | pandas.isna |
'''
Feature Engineering and model training
'''
import pickle
import pandas as pd
import numpy as np
from sklearn.decomposition import NMF
from sklearn.impute import KNNImputer
links = pd.DataFrame(pd.read_csv('links.csv'))
movies_ = pd.DataFrame(pd.read_csv('movies.csv'))
ratings = pd.DataFrame(pd.read_csv('ratings.csv'))
tags = pd.DataFrame(pd.read_csv('tags.csv'))
# Take columns from movies into links
links['title'] = movies_['title']
links['genres'] = movies_['genres']
# Set Indexes
links.set_index('movieId', inplace=True)
ratings.set_index(['movieId', 'userId'], inplace=True)
tags.set_index(['movieId', 'userId'], inplace=True)
# Merge links, ratings and tags into "df"
links_ratings = pd.merge(left=links, right=ratings, left_index=True, right_index=True)
df = | pd.merge(left=links_ratings, right=tags, how='left', left_on=['movieId', 'userId'], right_on=['movieId', 'userId']) | pandas.merge |
from sklearn.datasets import load_breast_cancer, fetch_california_housing
import pandas as pd
import numpy as np
import pickle
import os
import collections
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
def handle_categorical_feat(X_df):
''' It moves the categorical features to the last '''
original_columns = []
one_hot_columns = []
for col_name, dtype in zip(X_df.dtypes.index, X_df.dtypes):
if dtype == object:
one_hot_columns.append(col_name)
else:
original_columns.append(col_name)
X_df = X_df[original_columns + one_hot_columns]
return X_df, one_hot_columns
def load_breast_data():
breast = load_breast_cancer()
feature_names = list(breast.feature_names)
X, y = pd.DataFrame(breast.data, columns=feature_names), pd.Series(breast.target)
dataset = {
'problem': 'classification',
'full': {
'X': X,
'y': y,
},
'd_name': 'breast',
'search_lam': np.logspace(-1, 2.5, 15),
}
return dataset
def load_adult_data():
# https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data
df = pd.read_csv("./datasets/adult.data", header=None)
df.columns = [
"Age", "WorkClass", "fnlwgt", "Education", "EducationNum",
"MaritalStatus", "Occupation", "Relationship", "Race", "Gender",
"CapitalGain", "CapitalLoss", "HoursPerWeek", "NativeCountry", "Income"
]
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols].copy()
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy()
# Make it as 0 or 1
y_df.loc[y_df == ' >50K'] = 1.
y_df.loc[y_df == ' <=50K'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'adult',
'search_lam': np.logspace(-2, 2, 15),
'n_splines': 50,
'onehot_columns': onehot_columns,
}
return dataset
def load_credit_data():
# https://www.kaggle.com/mlg-ulb/creditcardfraud
df = pd.read_csv(r'./datasets/creditcard.csv')
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols]
y_df = df[label]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'credit',
'search_lam': np.logspace(-0.5, 2.5, 8),
}
return dataset
def load_churn_data():
# https://www.kaggle.com/blastchar/telco-customer-churn/downloads/WA_Fn-UseC_-Telco-Customer-Churn.csv/1
df = pd.read_csv(r'./datasets/WA_Fn-UseC_-Telco-Customer-Churn.csv')
train_cols = df.columns[1:-1] # First column is an ID
label = df.columns[-1]
X_df = df[train_cols].copy()
# Handle special case of TotalCharges wronly assinged as object
X_df['TotalCharges'][X_df['TotalCharges'] == ' '] = 0.
X_df.loc[:, 'TotalCharges'] = pd.to_numeric(X_df['TotalCharges'])
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy() # 'Yes, No'
# Make it as 0 or 1
y_df[y_df == 'Yes'] = 1.
y_df[y_df == 'No'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'churn',
'search_lam': np.logspace(0, 3, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_pneumonia_data(folder='/media/intdisk/medical/RaniHasPneumonia/'):
featurename_file = os.path.join(folder, 'featureNames.txt')
col_names = pd.read_csv(featurename_file, delimiter='\t', header=None, index_col=0).iloc[:, 0].values
def read_data(file_path='pneumonia/RaniHasPneumonia/medis9847c.data'):
df = pd.read_csv(file_path, delimiter='\t', header=None)
df = df.iloc[:, :-1] # Remove the last empty wierd column
df.columns = col_names
return df
df_train = read_data(os.path.join(folder, 'medis9847c.data'))
df_test = read_data(os.path.join(folder, 'medis9847c.test'))
df = pd.concat([df_train, df_test], axis=0).reset_index(drop=True)
X_df = df.iloc[:, :-1]
y_df = df.iloc[:, -1]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'test_size': 4352 / 14199,
'd_name': 'pneumonia',
'search_lam': np.logspace(0, 3, 15),
}
return dataset
def load_heart_data():
# https://www.kaggle.com/sonumj/heart-disease-dataset-from-uci
df = pd.read_csv('./datasets/HeartDisease.csv')
label = df.columns[-2]
train_cols = list(df.columns[1:-2]) + [df.columns[-1]]
X_df = df[train_cols]
y_df = df[label]
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
# Impute the missingness as 0
X_df = X_df.apply(lambda col: col if col.dtype == object else col.fillna(0.), axis=0)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'heart',
'search_lam': np.logspace(0, 3, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_mimiciii_data():
df_adult = pd.read_csv('./datasets/adult_icu.gz', compression='gzip')
train_cols = [
'age', 'first_hosp_stay', 'first_icu_stay', 'adult_icu', 'eth_asian',
'eth_black', 'eth_hispanic', 'eth_other', 'eth_white',
'admType_ELECTIVE', 'admType_EMERGENCY', 'admType_NEWBORN',
'admType_URGENT', 'heartrate_min', 'heartrate_max', 'heartrate_mean',
'sysbp_min', 'sysbp_max', 'sysbp_mean', 'diasbp_min', 'diasbp_max',
'diasbp_mean', 'meanbp_min', 'meanbp_max', 'meanbp_mean',
'resprate_min', 'resprate_max', 'resprate_mean', 'tempc_min',
'tempc_max', 'tempc_mean', 'spo2_min', 'spo2_max', 'spo2_mean',
'glucose_min', 'glucose_max', 'glucose_mean', 'aniongap', 'albumin',
'bicarbonate', 'bilirubin', 'creatinine', 'chloride', 'glucose',
'hematocrit', 'hemoglobin', 'lactate', 'magnesium', 'phosphate',
'platelet', 'potassium', 'ptt', 'inr', 'pt', 'sodium', 'bun', 'wbc']
label = 'mort_icu'
X_df = df_adult[train_cols]
y_df = df_adult[label]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'mimiciii',
'search_lam': np.logspace(0, 3, 15),
}
return dataset
def load_mimicii_data():
cols = ['Age', 'GCS', 'SBP', 'HR', 'Temperature',
'PFratio', 'Renal', 'Urea', 'WBC', 'CO2', 'Na', 'K',
'Bilirubin', 'AdmissionType', 'AIDS',
'MetastaticCancer', 'Lymphoma', 'HospitalMortality']
table = pd.read_csv('./datasets/mimic2.data', delimiter=' ', header=None)
table.columns = cols
X_df = table.iloc[:, :-1]
y_df = table.iloc[:, -1]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'mimicii',
'search_lam': np.logspace(-2, 3.5, 15),
}
return dataset
def load_diabetes2_data(load_cache=False):
cache_dataset_path = './datasets/diabetes_cache.pkl'
if load_cache and os.path.exists(cache_dataset_path):
print('Find the diabetes dataset. Load from cache.')
with open(cache_dataset_path, 'rb') as fp:
dataset = pickle.load(fp)
return dataset
df = pd.read_csv('./datasets/dataset_diabetes/diabetic_data.csv')
x_cols = df.columns[2:-1]
y_col = df.columns[-1]
X_df = df[x_cols].copy()
y_df = df[y_col].copy()
y_df.loc[(y_df == 'NO') | (y_df == '>30')] = 0
y_df.loc[y_df == '<30'] = 1
# is_false = (y_df == 'NO')
# y_df.loc[is_false] = 0
# y_df.loc[~is_false] = 1
y_df = y_df.astype(int)
# Preprocess X
X_df.loc[:, 'age'] = X_df.age.apply(lambda s: (int(s[1:s.index('-')]) + int(s[(s.index('-') + 1):-1])) / 2).astype(int)
X_df.loc[:, 'weight'] = X_df.weight.apply(lambda s: 0. if s == '?' else ((float(s[1:s.index('-')]) + float(s[(s.index('-') + 1):-1])) / 2 if '-' in s else float(s[1:])))
X_df.loc[:, 'admission_source_id'] = X_df.admission_source_id.astype('object')
X_df.loc[:, 'admission_type_id'] = X_df.admission_type_id.astype('object')
X_df.loc[:, 'discharge_disposition_id'] = X_df.discharge_disposition_id.astype('object')
X_df.loc[:, 'change'] = X_df.change.apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'diabetesMed'] = X_df.diabetesMed.apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'metformin-pioglitazone'] = X_df['metformin-pioglitazone'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'metformin-rosiglitazone'] = X_df['metformin-rosiglitazone'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'glipizide-metformin'] = X_df['glipizide-metformin'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'troglitazone'] = X_df['troglitazone'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'tolbutamide'] = X_df['tolbutamide'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'acetohexamide'] = X_df['acetohexamide'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df = X_df.drop(['citoglipton', 'examide'], axis=1) # Only have NO in the data
# diag_combined = X_df.apply(lambda x: set(
# [x.diag_1 for i in range(1) if x.diag_1 != '?'] + [x.diag_2 for i in range(1) if x.diag_2 != '?'] + [x.diag_3 for i in range(1) if x.diag_3 != '?']
# ), axis=1)
# diag_combined = diag_combined.apply(collections.Counter)
# diag_multihot_encode = pd.DataFrame.from_records(diag_combined).fillna(value=0).astype(np.uint8)
# diag_multihot_encode.columns = ['diag_%s' % str(c) for c in diag_multihot_encode.columns]
X_df = X_df.drop(['diag_1', 'diag_2', 'diag_3'], axis=1)
# X_df = pd.concat([X_df, diag_multihot_encode], axis=1)
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'diabetes2',
'search_lam': np.logspace(-3, 2, 8),
'n_splines': 50,
'onehot_columns': onehot_columns,
}
with open(cache_dataset_path, 'wb') as op:
pickle.dump(dataset, op)
return dataset
def load_TCGA_data(test_split=0.33, n_splits=20, cosmic=True, random_state=1377, **kwargs):
np.random.seed(random_state)
filename = 'pancancer_cosmic.npz' if cosmic else 'pancancer_parsed.npz'
x = np.load('datasets/TCGA/%s' % filename)['arr_0']
# log transform
x_df = pd.DataFrame(np.log10(x + 1))
# append the column name
transcript_names_path = 'transcript_names_cosmic' if cosmic else 'transcript_names'
x_df.columns = np.load('datasets/TCGA/%s.npy' % transcript_names_path)
# remove the columns with std as 0
x_df = x_df.loc[:, (x.std(axis=0) > 0.)]
covars = pd.read_csv('datasets/TCGA/potential_covariates.tsv', delimiter='\t')
covars['label'] = np.logical_or(covars[['sample_type']] == 'Primary Blood Derived Cancer - Peripheral Blood',
np.logical_or(covars[['sample_type']] == 'Additional Metastatic',
np.logical_or(covars[['sample_type']] == 'Recurrent Tumor',
np.logical_or(covars[['sample_type']] == 'Additional - New Primary',
np.logical_or(covars[['sample_type']] == 'Metastatic',
covars[['sample_type']] == 'Primary Tumor')))))
stratify_lookup = covars.groupby('submitter_id').label.apply(lambda x: len(x))
covars['stratify'] = covars.submitter_id.apply(lambda x: stratify_lookup[x])
covars = covars[['submitter_id', 'label', 'stratify']]
covars['patient_idxes'] = list(range(covars.shape[0]))
def group_shuffle_split():
for _ in range(n_splits):
train_lookups = []
for num_record, df2 in covars.groupby('stratify'):
train_lookup = df2.groupby('submitter_id').apply(lambda x: True)
# randomly change them to be 0
all_idxes = np.arange(len(train_lookup))
np.random.shuffle(all_idxes)
is_test_idxes = all_idxes[:int(len(train_lookup) * test_split)]
train_lookup[is_test_idxes] = False
train_lookups.append(train_lookup)
train_lookups = pd.concat(train_lookups)
covars['is_train'] = covars.submitter_id.apply(lambda x: train_lookups[x])
train_idxes = covars.patient_idxes[covars.is_train].values
test_idxes = covars.patient_idxes[~covars.is_train].values
yield train_idxes, test_idxes
y = covars['label'].astype(float)
stratify = covars['stratify']
dataset = {
'problem': 'classification',
'full': {
'X': x_df,
'y': y,
'ss': group_shuffle_split(),
},
'd_name': 'TCGA-cosmic' if cosmic else 'TCGA-full',
}
return dataset
def load_support2cls2_data():
# http://biostat.mc.vanderbilt.edu/wiki/Main/DataSets
df = pd.read_csv('./datasets/support2/support2.csv')
one_hot_encode_cols = ['sex', 'dzclass', 'race' , 'ca', 'income']
target_variables = ['hospdead']
remove_features = ['death', 'slos', 'd.time', 'dzgroup', 'charges', 'totcst',
'totmcst', 'aps', 'sps', 'surv2m', 'surv6m', 'prg2m', 'prg6m',
'dnr', 'dnrday', 'avtisst', 'sfdm2']
df = df.drop(remove_features, axis=1)
rest_colmns = [c for c in df.columns if c not in (one_hot_encode_cols + target_variables)]
# Impute the missing values for 0.
df[rest_colmns] = df[rest_colmns].fillna(0.)
# df = pd.get_dummies(df)
df, onehot_columns = handle_categorical_feat(df)
df['income'][df['income'].isna()] = 'NaN'
df['income'][df['income'] == 'under $11k'] = ' <$11k'
df['race'][df['race'].isna()] = 'NaN'
X_df = df.drop(target_variables, axis=1)
y_df = df[target_variables[0]]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'support2cls2',
'search_lam': np.logspace(1.5, 4, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_onlinenewscls_data():
dataset = load_onlinenews_data()
y_df = dataset['full']['y']
y_df[y_df < 1400] = 0
y_df[y_df >= 1400] = 1
X_df = dataset['full']['X']
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'onlinenewscls',
'search_lam': np.logspace(0, 4, 15),
}
return dataset
def load_compass_data():
df = pd.read_csv('./datasets/recid.csv', delimiter=',')
target_variables = ['two_year_recid']
# df = pd.get_dummies(df, prefix=one_hot_encode_cols)
df, onehot_columns = handle_categorical_feat(df)
X_df = df.drop(target_variables, axis=1)
# X_df = X_df.astype('float64')
y_df = df[target_variables[0]]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'compass',
'onehot_columns': onehot_columns,
}
return dataset
def load_compas2_data():
df = pd.read_csv('./datasets/recid_score.csv', delimiter=',')
target_variables = ['decile_score']
# df = pd.get_dummies(df, prefix=one_hot_encode_cols)
df, onehot_columns = handle_categorical_feat(df)
X_df = df.drop(target_variables, axis=1)
y_df = df[target_variables[0]]
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'compas2',
'onehot_columns': onehot_columns,
}
return dataset
def load_gcredit_data():
''' Load German Credit dataset '''
df = pd.read_csv('./datasets/german_credit/credit.data', delimiter='\t', header=None)
df.columns = [
'checking_balance', 'months_loan_duration', 'credit_history', 'purpose', 'amount',
'savings_balance', 'employment_length', 'installment_rate', 'personal_status',
'other_debtors', 'residence_history', 'property', 'age', 'installment_plan', 'housing',
'existing_credits', 'dependents', 'telephone', 'foreign_worker', 'job', 'class']
target_variables = ['class']
# df, onehot_columns = handle_categorical_feat(df)
X_df = df.drop(target_variables, axis=1)
y_df = df[target_variables[0]]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'gcredit',
}
return dataset
''' =================== Regression Datasets ==================== '''
def load_bikeshare_data():
df = pd.read_csv('./datasets/bikeshare/hour.csv').set_index('instant')
train_cols = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday',
'workingday', 'weathersit', 'temp', 'atemp', 'hum', 'windspeed']
label = 'cnt'
X_df = df[train_cols]
y_df = df[label]
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'bikeshare',
'search_lam': np.logspace(-0.5, 2, 15),
}
return dataset
def load_calhousing_data():
X, y = fetch_california_housing(data_home='./datasets/', download_if_missing=True, return_X_y=True)
columns = ['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup', 'Latitude', 'Longitude']
dataset = {
'problem': 'regression',
'full': {
'X': pd.DataFrame(X, columns=columns),
'y': | pd.Series(y) | pandas.Series |
'''
Pull movie metadata from `The Open Movie Database` API
for films that were scraped from `The Numbers` website
'''
import time
import json
import requests
import pandas as pd
from src.my_aws import S3
KEY_OMDB = 'OMDB_API.csv'
KEY_NUM = 'TheNumbers_budgets.csv'
BUCKET = 'movie-torrents'
class OMDBApi():
'''
Pull movie metadata from `The Open Movie Database` API
for films that were scraped from `The Numbers` website
'''
def __init__(self):
'''
Get an S3 connection object, set website base address,
and get the numbers data to create the query parameters
'''
self.s3_connect = S3()
self.base_address = 'http://www.omdbapi.com'
self.the_numbers_data = self.get_thenumbers_data()
self.query_params = self.get_query_params()
self.api_data = None
def get_thenumbers_data(self):
'''
Pull down movie data previously scraped from `The Numbers`
website from S3 storage
Args:
none
Returns:
pd.dataframe: Dataframe of data from `The Numbers` website
'''
the_numbers_data = self.s3_connect.get_data(KEY_NUM, BUCKET)
return the_numbers_data
def get_query_params(self):
'''
Create a list of query tuple parameters to be used for
querying the OMDB API
Args:
none
Returns:
List(tuples): List of query value tuples for ODMB API
'''
numbers_title = self.the_numbers_data['title']
numbers_year = [year[:4]
for year in self.the_numbers_data['release_date']]
query_params = [(title, year)
for title, year in zip(numbers_title, numbers_year)]
return query_params
def get_omdb_metadata(self, omdb_api_key):
'''
Poll OMDB API to get metadata for the movie title and year provided
from the scraped `The Numbers` data
Args:
omdb_api_key (str): API key required to use OMDB API
Returns:
pd.dataframe: Pandas dataframe of data with OMDB metadata appended
'''
polled_records = []
for title, year in self.query_params:
time.sleep(0.5)
payload = {'t': title, 'y': year, 'apikey': omdb_api_key}
html = requests.get(self.base_address, params=payload)
resp = json.loads(html.text)
if html.status_code != 200 or 'Error' in resp.keys():
continue
html_text = html.text
html_json = json.loads(html_text)
polled_records.append(html_json)
api_data = | pd.DataFrame.from_dict(polled_records, orient='columns') | pandas.DataFrame.from_dict |
#!/opt/conda/bin/python
import click
import os
import os.path as op
import pandas as pd
from sklearn.ensemble import VotingClassifier
from sklearn.preprocessing import LabelEncoder
from xgboost import XGBClassifier
def get_voting_classifier():
dir_path = os.path.dirname(os.path.realpath(__file__))
xgb_model_dir = op.join(dir_path, "saved_models")
models = {}
xgb_model_files = sorted(
[fname for fname in os.listdir(xgb_model_dir) if fname.endswith(".json")]
)
for fname in xgb_model_files:
xgb = XGBClassifier()
xgb.load_model(op.join(xgb_model_dir, fname))
cv_idx = int(fname.split(".json")[0][-1])
models[cv_idx] = xgb
weights = {
0: 0.9439393939393939,
1: 0.9237373737373737,
2: 0.8603174603174604,
3: 0.8818181818181818,
4: 0.907070707070707,
5: 0.9312169312169312,
}
estimators = {f"cv{i}": models[i] for i in weights.keys()}
voter = VotingClassifier(
estimators=estimators, weights=list(weights.values()), voting="soft"
)
voter.estimators_ = list(estimators.values())
voter.le_ = LabelEncoder().fit([0, 1])
voter.classes_ = voter.le_.classes_
return voter
def predict_ratings(input_df):
expected_columns = [
"raw_dimension_x",
"raw_dimension_y",
"raw_dimension_z",
"raw_voxel_size_x",
"raw_voxel_size_y",
"raw_voxel_size_z",
"raw_max_b",
"raw_neighbor_corr",
"raw_num_bad_slices",
"raw_num_directions",
"raw_coherence_index",
"raw_incoherence_index",
"t1_dimension_x",
"t1_dimension_y",
"t1_dimension_z",
"t1_voxel_size_x",
"t1_voxel_size_y",
"t1_voxel_size_z",
"t1_max_b",
"t1_neighbor_corr",
"t1_num_bad_slices",
"t1_num_directions",
"t1_coherence_index",
"t1_incoherence_index",
"mean_fd",
"max_fd",
"max_rotation",
"max_translation",
"max_rel_rotation",
"max_rel_translation",
"t1_dice_distance",
]
try:
df_qc = input_df[expected_columns]
except KeyError:
raise ValueError(
"Columns in input file do not match expected columns."
f"Expected: {expected_columns}, "
f"but got: {df_qc.columns.tolist()}"
)
voter = get_voting_classifier()
ratings = voter.predict_proba(df_qc)[:, 1]
df_ratings = | pd.DataFrame(index=df_qc.index) | pandas.DataFrame |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(self.data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = self.data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _use_dynamic_x(self):
freq = self._index_freq()
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
return (freq is not None) and self._is_dynamic_freq(freq)
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _make_plot(self):
self._initialize_prior(len(self.data))
if self._is_ts_plot():
data = self._maybe_convert_index(self.data)
x = data.index # dummy, not used
plotf = self._get_ts_plot_function()
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
it = self._iter_data()
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
def _get_stacked_values(self, y, label):
if self.stacked:
if (y >= 0).all():
return self._pos_prior + y
elif (y <= 0).all():
return self._neg_prior + y
else:
raise ValueError('When stacked is True, each column must be either all positive or negative.'
'{0} contains both positive and negative values'.format(label))
else:
return y
def _get_plot_function(self):
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
# column_num is used to get the target column from protf in line and area plots
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
self._update_prior(y)
return lines
return plotf
def _get_ts_plot_function(self):
from pandas.tseries.plotting import tsplot
plotf = self._get_plot_function()
def _plot(ax, x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
lines = tsplot(data, plotf, ax=ax, style=style, **kwds)
return lines
return _plot
def _initialize_prior(self, n):
self._pos_prior = np.zeros(n)
self._neg_prior = np.zeros(n)
def _update_prior(self, y):
if self.stacked and not self.subplots:
# tsplot resample may changedata length
if len(self._pos_prior) != len(y):
self._initialize_prior(len(y))
if (y >= 0).all():
self._pos_prior += y
elif (y <= 0).all():
self._neg_prior += y
def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
ax = self._get_ax(0)
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data.index = data.index.to_period(freq=freq)
return data
def _post_plot_logic(self):
df = self.data
condition = (not self._use_dynamic_x()
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
for ax in self.axes:
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
def _get_plot_function(self):
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
if (y >= 0).all():
start = self._pos_prior
elif (y <= 0).all():
start = self._neg_prior
else:
start = np.zeros(len(y))
if not 'color' in kwds:
kwds['color'] = lines[0].get_color()
self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)
self._update_prior(y)
return lines
return plotf
def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self):
LinePlot._post_plot_logic(self)
if self.ylim is None:
if (self.data >= 0).all().all():
for ax in self.axes:
ax.set_ylim(0, None)
elif (self.data <= 0).all().all():
for ax in self.axes:
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if com.is_list_like(self.left):
self.left = np.array(self.left)
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
start = start + self.bottom
return ax.bar(x, y, w, bottom=start, log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, log=self.log, **kwds)
else:
raise ValueError("BarPlot kind must be either 'bar' or 'barh'")
return f
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
bar_f = self._get_plot_function()
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
if self.subplots:
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self):
for ax in self.axes:
if self.use_index:
str_index = [com.pprint_thing(key) for key in self.data.index]
else:
str_index = [com.pprint_thing(key) for key in
range(self.data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
if self.kind == 'bar':
ax.set_xlim((s_edge, e_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(str_index)
if name is not None and self.use_index:
ax.set_xlabel(name)
elif self.kind == 'barh':
# horizontal bars
ax.set_ylim((s_edge, e_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(str_index)
if name is not None and self.use_index:
ax.set_ylabel(name)
else:
raise NotImplementedError(self.kind)
@property
def orientation(self):
if self.kind == 'bar':
return 'vertical'
elif self.kind == 'barh':
return 'horizontal'
else:
raise NotImplementedError(self.kind)
class HistPlot(LinePlot):
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if com.is_integer(self.bins):
# create common bin edge
values = self.data.convert_objects()._get_numeric_data()
values = np.ravel(values)
values = values[~com.isnull(values)]
hist, self.bins = np.histogram(values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _get_plot_function(self):
def plotf(ax, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.bins) - 1)
y = y[~com.isnull(y)]
bottom = self._pos_prior + self.bottom
# ignore style
n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins,
bottom=bottom, **kwds)
self._update_prior(n)
return patches
return plotf
def _make_plot(self):
plotf = self._get_plot_function()
colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds['label'] = label
self._maybe_add_color(colors, kwds, style, i)
if style is not None:
kwds['style'] = style
artists = plotf(ax, y, column_num=i, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _post_plot_logic(self):
if self.orientation == 'horizontal':
for ax in self.axes:
ax.set_xlabel('Frequency')
else:
for ax in self.axes:
ax.set_ylabel('Frequency')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
sample_range = max(y) - min(y)
ind = np.linspace(min(y) - 0.5 * sample_range,
max(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
def _get_plot_function(self):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
f = MPLPlot._get_plot_function(self)
def plotf(ax, y, style=None, column_num=None, **kwds):
y = remove_na(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=self.bw_method)
else:
gkde = gaussian_kde(y)
if self.bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
ind = self._get_ind(y)
y = gkde.evaluate(ind)
lines = f(ax, ind, y, style=style, **kwds)
return lines
return plotf
def _post_plot_logic(self):
for ax in self.axes:
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
self.kwds.setdefault('colors', self._get_colors(num_colors=len(self.data),
color_kwds='colors'))
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = com.pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [com.pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(label, value) for
label, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type=None, **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
def _get_plot_function(self):
def plotf(ax, y, column_num=None, **kwds):
if y.ndim == 2:
y = [remove_na(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na(y)
bp = ax.boxplot(y, **kwds)
if self.return_type == 'dict':
return bp, bp
elif self.return_type == 'both':
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
return plotf
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid key '{0}' "
"The key must be either {1}".format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
plotf = self._get_plot_function()
if self.subplots:
self._return_obj = compat.OrderedDict()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=i, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [ | com.pprint_thing(label) | pandas.core.common.pprint_thing |
import os
import itertools
import numpy as np
import pandas as pd
import xarray as xr
from pylab import *
import torch
from torch.autograd import Variable
"""
Functions for use with Pytorch.
This module contains utility functions for the S2S machine learning project.
Author: <NAME>, NCAR (<EMAIL>)
"""
def pacific_lon(array):
"""
Help converting pacific 360 longitudes to 180.
Args:
array: longitude array.
Returns:
converted longitudes array.
"""
return xr.where(array > 180,
array - 360,
array)
def compute_lat_weights(ds):
"""
Computation of weights for latitude/longitude grid mean.
Weights are cosine of the latitude.
Args:
ds: xarray dataset.
Returns:
latitude weights.
"""
weights = np.cos(np.deg2rad(ds.lat))
_, weights = xr.broadcast(ds, weights)
weights = weights.isel(time=0)
return weights
def matlab_to_python_time(ds):
"""
Conversion of matlab time to python time (human understandable).
Args:
ds: xarray dataset.
Returns:
pandas datetime timestamps.
"""
datenums = ds.coords['time'].values
timestamps = | pd.to_datetime(datenums-719529, unit='D') | pandas.to_datetime |
from typing import List
from decimal import Decimal
from yayFinPy.stock import Stock
import pandas as pd
def test_constructor():
try:
stock = Stock("AAPL")
assert(stock != None)
return 1
except Exception as e:
print("Test Failed: test_constructor: ", e)
return 0
def test_constructor_failure():
try:
stock = Stock("INVALID")
except:
return 1
print("Test Failed: test_constructor_failure")
return 0
def test_stock_attributes():
try:
stock = Stock("AAPL")
assert(stock != None)
assert(type(stock.bid) == Decimal)
assert(type(stock.ask) == Decimal)
assert(type(stock.bid_size) == Decimal)
assert(type(stock.ask_size) == Decimal)
assert(type(stock.name) == str)
assert(type(stock.pe_ratio) == Decimal)
assert(type(stock.peg_ratio) == Decimal)
assert(type(stock.market_cap) == Decimal)
assert(stock.name == "Apple Inc.")
return 1
except Exception as e:
print("Test Failed: test_stock_attributes", e)
return 0
def test_stock_splits():
try:
stock = Stock("AAPL")
splits = stock.splits
assert(type(splits) == type(pd.Series(dtype='float64')))
return 1
except Exception as e:
print("Test Failed: test_stock_splits", e)
return 0
def test_stock_dividends():
try:
stock = Stock("AAPL")
dividends = stock.dividends
assert(type(dividends) == type( | pd.Series(dtype='float64') | pandas.Series |
import pandas as pd
from datetime import datetime
import time
import matplotlib.pyplot as plt
from sklearn.preprocessing import Normalizer,MinMaxScaler
from imblearn.over_sampling import SMOTE
from sklearn.utils import shuffle
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report
import numpy as np
import seaborn as sns
import plotly.graph_objs as go
import plotly.plotly as py
import plotly
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore")
#get_ipython().run_line_magic('matplotlib', 'inline')
#fd-future data set
#validating-0 or 1 (0-tetsing ,1= future prediction)
def flood_classifier(filename,fd,validating=0):
data1= | pd.read_excel('data/'+filename+'.xlsx') | pandas.read_excel |
def initcap(m):
return m[:1].upper()+m[1:].lower()
def instr(a,b):
ls=[]
for i in range(len(a)):
ls.append(a[i])
if ls[i]==b:
return i+1
return 0
def connect(data) :
import cx_Oracle
import pandas as pd
dsn = cx_Oracle.makedsn('localhost',1521,'orcl') # ์ค๋ผํด์ ๋ํ ์ฃผ์์ ๋ณด
db = cx_Oracle.connect('scott','tiger',dsn) # ์ค๋ผํด ์ ์ ์ ์ ์ ๋ณด (์ด๋ฆ, ๋น๋ฐ๋ฒํธ, ์ฃผ์)
cursor = db.cursor() # ๋ฐ์ดํฐ๋ฅผ ๋ด์ ๋ฉ๋ชจ๋ฆฌ ์ด๋ฆ์ cursor๋ก ์ ์ธ
cursor.execute("""select * from %s""" %data) # """SQL Query์์ฑ""" ํ ๊ฒฐ๊ณผ๊ฐ cursor ๋ฉ๋ชจ๋ฆฌ์ ๋ด๊น
row = cursor.fetchall() # cursor ๋ฉ๋ชจ๋ฆฌ์ ๋ด๊ธด ๋ฐ์ดํฐ๋ฅผ ํ ํ์ฉ ๊ฐ์ ธ์ด
colname = cursor.description # ์์์ SELECTํ ํ
์ด๋ธ์ ์ปฌ๋ผ๋ช
์ ๊ฐ์ ธ์ด
cursor.close() # cursor ๋ฉ๋ชจ๋ฆฌ๋ฅผ ๋ซ์
col = [] # list ์์ฑ
for i in colname :
col.append(i[0]) # ํ
์ด๋ธ ์ปฌ๋ผ๋ช
์ ์ฑ์๋ฃ์
select_data = pd.DataFrame(row) # ๋ฐ์ดํฐ๋ฅผ ํ
์ด๋ธํ(Data Frame)ํจ
select_data = | pd.DataFrame(row,columns=col) | pandas.DataFrame |
#!/usr/bin/env python
"""
Parses SPINS' EA log files into BIDS tsvs.
Usage:
dm_parse_ea.py [options] <study>
Arguments:
<study> A datman study to parse task data for.
Options:
--experiment <experiment> Single datman session to generate TSVs for
--timings <timing_path> The full path to the EA timings file.
Defaults to the 'EA-timing.csv' file in
the assets folder.
--lengths <lengths_path> The full path to the file containing the
EA vid lengths. Defaults to the
'EA-vid-lengths.csv' in the assets folder.
--regex <regex> The regex to use to find the log files to
parse. [default: *UCLAEmpAcc*]
--debug Set log level to debug
"""
import re
import os
import glob
import logging
import pandas as pd
import numpy as np
from docopt import docopt
import datman.config
import datman.scanid
logging.basicConfig(
level=logging.WARN, format="[%(name)s] %(levelname)s: %(message)s"
)
logger = logging.getLogger(os.path.basename(__file__))
# reads in log file and subtracts the initial TRs/MRI startup time
def read_in_logfile(path):
log_file = pd.read_csv(path, sep="\t", skiprows=3)
time_to_subtract = int(log_file.Duration[log_file.Code == "MRI_start"])
log_file.Time = log_file.Time - time_to_subtract
return log_file
# Remove the rating when there is a scanner response during the task instead of just at the start
def clean_logfile(log_file):
scan_response = ["101", "104"]
# 1st list of indexes to remove scan responses and ratings in the dataframe
indexes_to_drop = []
# Remove the rating that come after the scan response when there is a 102/103 response right before or after
# Also remove the rating that come after scan response and carry over to the next video
# The rating is always registered two indexes after the scan response
for index, row in log_file.iterrows():
if ("rating" in log_file["Code"][index]) and any(
resp in log_file["Code"][index - 2] for resp in scan_response
):
# index to select the rating to drop
indexes_to_drop.append(index)
# index - 2 to select the scan response to drop
indexes_to_drop.append(index - 2)
if len(indexes_to_drop) == 0:
log_file_cleaned = log_file
else:
log_file_cleaned = log_file.drop(log_file.index[indexes_to_drop])
log_file_cleaned = log_file_cleaned.reset_index(drop=True)
logger.warning(
f"Removed {len(indexes_to_drop)/2} registered rating occurred before or after actual rating"
)
# 2nd list of indexes to drop the remaining scan responses and ratings
indexes_to_drop_1 = []
# Remove the remaining rating response come right after scan response
# The rating is registered one index after the scan response
for index, row in log_file_cleaned.iterrows():
if ("rating" in log_file_cleaned["Code"][index]) and any(
resp in log_file_cleaned["Code"][index - 1]
for resp in scan_response
):
# index to select the remaining rating to drop
indexes_to_drop_1.append(index)
# index - 1 select the remaing scan response to drop
indexes_to_drop_1.append(index - 1)
if len(indexes_to_drop_1) == 0:
final_log_file = log_file_cleaned
else:
final_log_file = log_file_cleaned.drop(
log_file_cleaned.index[indexes_to_drop_1]
)
final_log_file = final_log_file.reset_index(drop=True)
logger.warning(
f"Removed {len(indexes_to_drop_1)/2} rating registered followed scanner responses"
)
return final_log_file
# Grabs the starts of blocks and returns rows for them
def get_blocks(log, vid_info):
# identifies the video trial types (as opposed to button press events etc)
mask = ["vid" in log["Code"][i] for i in range(0, log.shape[0])]
df = pd.DataFrame(
{
"onset": log.loc[mask]["Time"],
"trial_type": log.loc[mask]["Event Type"],
"movie_name": log.loc[mask]["Code"],
}
)
df["trial_type"] = df["movie_name"].apply(
lambda x: "circle_block" if "cvid" in x else "EA_block"
)
df["duration"] = df["movie_name"].apply(
lambda x: int(vid_info[x]["duration"]) * 10000
if x in vid_info
else pd.NA
)
df["stim_file"] = df["movie_name"].apply(
lambda x: vid_info[x]["stim_file"] if x in vid_info else pd.NA
)
df["end"] = df["onset"] + df["duration"]
return df
def format_vid_info(vid):
vid.columns = [c.lower() for c in vid.columns]
vid = vid.rename(index={0: "stim_file", 1: "duration"})
vid = vid.to_dict()
return vid
def read_in_standard(timing_path):
df = pd.read_csv(timing_path).astype(str)
df.columns = [c.lower() for c in df.columns]
df_dict = df.drop([0, 0]).reset_index(drop=True).to_dict(orient="list")
return df_dict
def get_series_standard(gold_standard, block_name):
return [float(x) for x in gold_standard[block_name] if x != "nan"]
def get_ratings(log):
rating_mask = ["rating" in log["Code"][i] for i in range(0, log.shape[0])]
df = pd.DataFrame(
{
"onset": log["Time"].loc[rating_mask].values,
"participant_value": log.loc[rating_mask]["Code"].values,
"event_type": "button_press",
"duration": 0,
}
)
# Pull rating value from formatted string
df["participant_value"] = df["participant_value"].str.strip().str[-1]
return df
def combine_dfs(blocks, ratings):
# combines the block rows with the ratings rows and sorts them
combo = blocks.append(ratings).sort_values("onset").reset_index(drop=True)
mask = pd.notnull(combo["trial_type"])
combo["space_b4_prev"] = combo["onset"].diff(periods=1)
combo["first_button_press"] = combo["duration"].shift() > 0
combo2 = combo.drop(
combo[
(combo["space_b4_prev"] < 1000)
& (combo["first_button_press"] == True)
].index
).reset_index(drop=True)
mask = pd.notnull(combo2["trial_type"])
block_start_locs = combo2[mask].index.values
last_block = combo2.iloc[block_start_locs[len(block_start_locs) - 1]]
end_row = {
"onset": last_block.end,
"rating_duration": 0,
"event_type": "last_row",
"duration": 0,
"participant_value": last_block.participant_value,
}
combo2 = combo2.append(end_row, ignore_index=True).reset_index(drop=True)
mask = pd.notnull(combo2["trial_type"])
block_start_locs = combo2[mask].index.values
combo2["rating_duration"] = combo2["onset"].shift(-1) - combo2[
"onset"
].where(
mask == False
) # noqa: E712
for i in range(len(block_start_locs)):
if block_start_locs[i] != 0:
combo2.rating_duration[block_start_locs[i - 1]] = (
combo2.end[block_start_locs[i - 1]]
- combo2.onset[block_start_locs[i - 1]]
)
for i in block_start_locs:
new_row = {
"onset": combo2.onset[i],
"rating_duration": combo2.onset[i + 1] - combo2.onset[i],
"event_type": "default_rating",
"duration": 0,
"participant_value": 5,
}
combo2 = combo2.append(new_row, ignore_index=True)
combo2 = combo2.sort_values(
by=["onset", "event_type"], na_position="first"
).reset_index(drop=True)
return combo2
def block_scores(ratings_dict, combo):
"""
Compute Pearson correlation between gold standard ratings
and participant ratings
"""
list_of_rows = []
summary_vals = {}
mask = pd.notnull(combo["trial_type"])
block_start_locs = combo[mask].index.values
block_start_locs = np.append(
block_start_locs, combo.tail(1).index.values, axis=None
)
for idx in range(1, len(block_start_locs)):
block_start = combo.onset[block_start_locs[idx - 1]]
block_end = combo.end[block_start_locs[idx - 1]]
block = combo.iloc[block_start_locs[idx - 1] : block_start_locs[idx]][
pd.notnull(combo.event_type)
]
block_name = (
combo.movie_name.iloc[
block_start_locs[idx - 1] : block_start_locs[idx]
][pd.notnull(combo.movie_name)]
.reset_index(drop=True)
.astype(str)
.get(0)
)
gold = get_series_standard(ratings_dict, block_name)
if "cvid" in block_name:
interval = np.arange(
combo.onset[block_start_locs[idx - 1]],
combo.end[block_start_locs[idx - 1]],
step=40000,
)
else:
interval = np.arange(
combo.onset[block_start_locs[idx - 1]],
combo.end[block_start_locs[idx - 1]],
step=20000,
)
if len(gold) < len(interval):
interval = interval[: len(gold)]
logger.warning(
"gold standard is shorter than the number of pt "
f"ratings. pt ratings truncated, block: {block_name}",
)
if len(interval) < len(gold):
gold = gold[: len(interval)]
logger.warning(
"number of pt ratings is shorter than the number "
f"of gold std, gold std truncated, block: {block_name}",
)
# this is to append for the remaining fraction of a second (so that
# the loop goes to the end i guess...)- maybe i dont need to do this
interval = np.append(interval, block_end)
two_s_avg = []
for x in range(len(interval) - 1):
start = interval[x]
end = interval[x + 1]
sub_block = block[
block["onset"].between(start, end)
| block["onset"].between(start, end).shift(-1)
]
block_length = end - start
if len(sub_block) != 0:
ratings = []
for index, row in sub_block.iterrows():
if row.onset < start:
numerator = (row.onset + row.rating_duration) - start
else:
if (row.onset + row.rating_duration) <= end:
numerator = row.rating_duration
elif (row.onset + row.rating_duration) > end:
numerator = end - row.onset
else:
numerator = 999999 # add error here
if row.event_type != "last_row":
ratings.append(
{
"start": start,
"end": end,
"row_time": row.rating_duration,
"row_start": row.onset,
"block_length": block_length,
"rating": row.participant_value,
"time_held": numerator,
}
)
nums = [float(d["rating"]) for d in ratings]
times = [
float(d["time_held"]) / block_length
for d in ratings
]
avg = np.sum(np.multiply(nums, times))
last_row = row.participant_value
else:
avg = last_row
two_s_avg.append(float(avg))
list_of_rows.append(
{
"event_type": "running_avg",
"participant_value": float(avg),
"onset": start,
"duration": end - start,
"gold_std": gold[x],
}
)
n_button_press = len(block[block.event_type == "button_press"].index)
block_score = np.corrcoef(gold, two_s_avg)[1][0]
key = str(block_name)
summary_vals.update(
{
key: {
"n_button_press": int(n_button_press),
"block_score": block_score,
"onset": block_start,
"duration": block_end - block_start,
}
}
)
return list_of_rows, summary_vals
def outputs_exist(log_file, output_path):
if not os.path.exists(output_path):
return False
if os.path.getmtime(output_path) < os.path.getmtime(log_file):
logger.error(
"Output file is less recently modified than its task file"
f" {log_file}. Output will be deleted and regenerated."
)
try:
os.remove(output_path)
except Exception as e:
logger.error(
f"Failed to remove output file {output_path}, cannot "
f"regenerate. Reason - {e}"
)
return True
return False
return True
def get_output_path(ident, log_file, dest_dir):
try:
os.makedirs(dest_dir)
except FileExistsError:
pass
part = re.findall(r"((?:part|RUN)\d).log", log_file)
if not part:
logger.error(
f"Can't detect which part task file {log_file} "
"corresponds to. Ignoring file."
)
return
else:
part = part[0]
return os.path.join(dest_dir, f"{ident}_EAtask_{part}.tsv")
def parse_task(ident, log_file, dest_dir, length_file, timing_file):
output_path = get_output_path(ident, log_file, dest_dir)
if outputs_exist(log_file, output_path):
return
# Reads in and clean the log, skipping the first three preamble lines
try:
log = read_in_logfile(log_file)
log_cleaned = clean_logfile(log)
except Exception as e:
logger.error(
f"Cannot parse {log_file}! File maybe corrupted! Skipping"
)
return
vid_in = pd.read_csv(length_file)
vid_info = format_vid_info(vid_in)
blocks = get_blocks(log_cleaned, vid_info)
ratings = get_ratings(log_cleaned)
combo = combine_dfs(blocks, ratings)
ratings_dict = read_in_standard(timing_file)
two_s_chunks, scores = block_scores(ratings_dict, combo)
combo["block_score"] = np.nan
combo["n_button_press"] = np.nan
combo = (
combo.append(two_s_chunks).sort_values("onset").reset_index(drop=True)
)
test = combo.loc[ | pd.notnull(combo["stim_file"]) | pandas.notnull |
#!/usr/bin/env python
import pandas
# From SF/Dataloader export a CSV in this format:
# "Account ID","<NAME>","<NAME>","Consumer Email","Alternate Email","Alternate EMail 2","Alternate EMail 3","Personal Email","Corporate Email","Preferred Email"
contacts = pandas.read_csv('contacts.csv', usecols=['Account ID', 'Consumer Email'])
contacts = contacts.drop_duplicates(subset='Consumer Email')
contacts.rename(columns={'Consumer Email': 'Email'}, inplace=True)
contacts['Email'] = contacts['Email'].str.lower()
# the tw.csv comes this format:
# "Email Address","First Name","<NAME>",user__pk,"subscription starts","subscription ends","Texas Weekly",EMAIL_TYPE,MEMBER_RATING,OPTIN_TIME,OPTIN_IP,CONFIRM_TIME,CONFIRM_IP,LATITUDE,LONGITUDE,GMTOFF,DSTOFF,TIMEZONE,CC,REGION,LAST_CHANGED,LEID,EUID,NOTES
tw = pandas.read_csv('tw.csv', usecols=['Email Address', '<NAME>', '<NAME>', 'EUID'])
tw.rename(columns={'Email Address': 'Email', 'EUID': 'Legacy ID'}, inplace=True)
tw['Email'] = tw['Email'].str.lower()
result = | pandas.merge(tw, contacts, on='Email', how='left') | pandas.merge |
import numpy as np
import pandas as pd
import pyEDM as edm
from sklearn.cross_decomposition import PLSCanonical, CCA
from tqdm import tqdm
from utils.constants import *
def read_corr_mat(filename):
corr_mat = np.array([])
with open(filename, "r") as file_obj:
for line in file_obj.readlines():
line = [float(num) for num in line.split(' ')]
corr_mat = np.append(corr_mat, line)
corr_mat = corr_mat.reshape(TARGET_SIZE, -1)
return corr_mat
def ccm_feat_selection(libsize, k_feat_to_select, devices_data, video_data,
read_result=False, save_result=False, filename=""):
assert (not read_result or not save_result)
if read_result:
corr_mat = read_corr_mat(filename)
return corr_mat.argsort(axis=1)[:, -k_feat_to_select:]
libsize = str(libsize)
corr_mat = np.zeros((video_data.shape[1], devices_data.shape[1]-1))
multi_data = | pd.concat([devices_data, video_data], axis=1, copy=False) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Mon May 14 17:29:16 2018
@author: jdkern
"""
from __future__ import division
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def exchange(year):
df_data = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Load_Path_Sim.csv',header=0)
c = ['Path3_sim','Path8_sim','Path14_sim','Path65_sim','Path66_sim']
df_data = df_data[c]
paths = ['Path3','Path8','Path14','Path65','Path66']
df_data.columns = paths
df_data = df_data.loc[year*365:year*365+364,:]
# select dispatchable imports
imports = df_data
imports = imports.reset_index()
for p in paths:
for i in range(0,len(imports)):
if p=='Path3' or p=='Path65' or p=='Path66': #SCRIPT ASSUMPTION: NEGATIVE = EXPORT. revert sign when needed
if imports.loc[i,p] >= 0:
imports.loc[i,p] = 0
else:
imports.loc[i,p] = -imports.loc[i,p]
else:
if imports.loc[i,p] < 0:
imports.loc[i,p] = 0
imports.to_csv('Path_setup/PNW_imports.csv')
# convert to minimum flow time series and dispatchable (daily)
df_mins = pd.read_excel('Path_setup/PNW_imports_minflow_profiles.xlsx',header=0)
lines = ['Path3','Path8','Path14','Path65','Path66']
for i in range(0,len(df_data)):
for L in lines:
if df_mins.loc[i,L] >= imports.loc[i,L]:
df_mins.loc[i,L] = imports.loc[i,L]
imports.loc[i,L] = 0
else:
imports.loc[i,L] = np.max((0,imports.loc[i,L]-df_mins.loc[i,L]))
dispatchable_imports = imports*24
dispatchable_imports.to_csv('Path_setup/PNW_dispatchable_imports.csv')
df_data = pd.read_csv('Path_setup/PNW_imports.csv',header=0)
# hourly minimum flow for paths
hourly = np.zeros((8760,len(lines)))
for i in range(0,365):
for L in lines:
index = lines.index(L)
hourly[i*24:i*24+24,index] = np.min((df_mins.loc[i,L], df_data.loc[i,L]))
H = | pd.DataFrame(hourly) | pandas.DataFrame |
import dateutil
import pandas as pd
"""
ๅคๆญ้ป่พ๏ผ้ๆกๅคๆญๆ ่ฎฐ
1.ๅไธๆก่ฎฐๅฝ๏ผๆพๅบๆถ้ด๏ผ้้ข๏ผๅคๆณจ
2.่ฅ้้ขไธบๆญฃ๏ผ็ญ้ๅบๅๅไธๅฐๆถๅ
้้ขไธบ่ด็่ฎฐๅฝ๏ผๅไน๏ผ็ธไผผ
3.ไป็ญ้ๅบ็่ฎฐๅฝ้ๆพๅบไธๅฝๅๅคๆณจไธๆ ท็่ฎฐๅฝ
4.่ฅๆๅ็ญ้ๅบ็df่ฎฐๅฝๆฐ>=1,่ฏฅๆกๆตๆฐดๆ ๆ
"""
def exclude_same_in_out(df):
df['nameOnOppositeCard'].fillna("', inplace=True")
df['remark'].fillna("', inplace=True")
"""
:param df: ็ฝ้ถๆตๆฐดdf
:param item_lst: ๆ้ค้กนๅ่กจ
:return: keep_df, drop_df
"""
"""
ๅๅไธๅฐๆถๅ
๏ผremarkๅๆ่
nameOnOppositeCardๅไธญ่ฟ้กน่ดฆๆทไธๅบ้กน่ดฆๆทไธ่ฝไธบๅไธไธปไฝใ
ๅจ่ฏฅๆถ้ดๅ
๏ผๅฆๆ่ฟ้กน่ดฆๆทไธๅบ้กน่ดฆๆทไธบๅไธไธปไฝ๏ผๅ่ฟ้กนๆตๆฐดไธ็ฎๅ
ฅๆๆๆตๆฐดใ
ๆณจๆ๏ผ
1 ่ฅremarkไธบ็ฉบ, ๅฏนremarkไธๅๅคๆญใ
2 ่ฅnameOnOppositeCardไธบ็ฉบ, ๅฏนnameOnOppositeCardไธๅๅคๆญใ
"""
df['transDate'] = pd.to_datetime(df['transDate'])
keep_row_lst = []
drop_row_lst = []
print('ๅฝๅๆๆdfไธๅ
ฑๆใ%sใ่ก' % df.shape[0])
for index, row in df.iterrows():
id = row['id']
is_valid = 1
name_on_oppsite = row['nameOnOppositeCard']
# ้ๆกๅคๆญ
time = row['transDate']
amout_money = row['amountMoney']
remark = row['remark']
description = row['description']
if remark == "" and name_on_oppsite == "":
pass
else:
if amout_money > 0:
select_df = df[df['amountMoney'] < 0]
else:
select_df = df[df['amountMoney'] > 0]
# ๅๅไธๅฐๆถ
start_time = time - dateutil.relativedelta.relativedelta(hours=1)
end_time = time + dateutil.relativedelta.relativedelta(hours=1)
#
tmp_df = select_df[(select_df['transDate'] >= start_time) & (select_df['transDate'] <= end_time)]
if tmp_df.shape[0] >= 1:
if remark != "":
tmp_df1 = tmp_df[tmp_df['remark'] == remark]
if tmp_df1.shape[0] >= 1:
is_valid = 0
if name_on_oppsite != "":
tmp_df2 = tmp_df[tmp_df['nameOnOppositeCard'] == name_on_oppsite]
if tmp_df2.shape[0] >= 1:
is_valid = 0
if is_valid == 0:
drop_row_lst.append(row)
else:
keep_row_lst.append(row)
keep_df = pd.DataFrame(keep_row_lst)
drop_df = | pd.DataFrame(drop_row_lst) | pandas.DataFrame |
import os
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian
import pandas as pd
from pandas import DataFrame, HDFStore, Series, _testing as tm, read_hdf
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io import pytables as pytables
from pandas.io.pytables import ClosedFileError, PossibleDataLossError, Term
pytestmark = pytest.mark.single
def test_mode(setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
msg = r"[\S]* does not exist"
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
with HDFStore(path, mode=mode) as store:
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
msg = (
"mode w is not allowed while performing a read. "
r"Allowed modes are r, r\+ and a."
)
with pytest.raises(ValueError, match=msg):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
msg = (
r"Re-opening the file \[[\S]*\] with mode \[a\] will delete the "
"current file!"
)
# invalid mode change
with pytest.raises(PossibleDataLossError, match=msg):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(setup_path):
with tm.ensure_clean(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_complibs_default_settings(setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame({"A": "foo", "B": "bar"}, index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
tm.assert_series_equal(s_nan, retr)
def test_multiple_open_close(setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
msg = (
r"The file [\S]* is already opened\. Please close it before "
r"reopening in write mode\."
)
with pytest.raises(ValueError, match=msg):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
msg = r"[\S]* file is not open!"
with pytest.raises(ClosedFileError, match=msg):
store.keys()
with pytest.raises(ClosedFileError, match=msg):
"df" in store
with pytest.raises(ClosedFileError, match=msg):
len(store)
with pytest.raises(ClosedFileError, match=msg):
store["df"]
with pytest.raises(ClosedFileError, match=msg):
store.select("df")
with pytest.raises(ClosedFileError, match=msg):
store.get("df")
with pytest.raises(ClosedFileError, match=msg):
store.append("df2", df)
with pytest.raises(ClosedFileError, match=msg):
store.put("df3", df)
with pytest.raises(ClosedFileError, match=msg):
store.get_storer("df2")
with pytest.raises(ClosedFileError, match=msg):
store.remove("df2")
with pytest.raises(ClosedFileError, match=msg):
store.select("df")
msg = "'HDFStore' object has no attribute 'df'"
with pytest.raises(AttributeError, match=msg):
store.df
def test_fspath():
with tm.ensure_clean("foo.h5") as path:
with | HDFStore(path) | pandas.HDFStore |
import errno
import json
import logging
import os
import shutil
import traceback
import uuid
import math
import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from matplotlib import pyplot as plt
from plotly.offline import plot
from scipy import stats
from natsort import natsorted
from sklearn import metrics
from sklearn.linear_model import LinearRegression
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.kb_GenericsReportClient import kb_GenericsReport
from GenericsAPI.Utils.DataUtil import DataUtil
from installed_clients.KBaseReportClient import KBaseReport
CORR_METHOD = ['pearson', 'kendall', 'spearman', 'mutual_info'] # correlation method
HIDDEN_SEARCH_THRESHOLD = 1500
class CorrelationUtil:
def _mkdir_p(self, path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _validate_compute_corr_matrix_params(self, params):
"""
_validate_compute_corr_matrix_params:
validates params passed to compute_correlation_matrix method
"""
logging.info('start validating compute_corrrelation_matrix params')
# check for required parameters
for p in ['input_obj_ref', 'workspace_name', 'corr_matrix_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _validate_compute_correlation_across_matrices_params(self, params):
"""
_validate_compute_correlation_across_matrices_params:
validates params passed to compute_correlation_across_matrices method
"""
logging.info('start validating compute_correlation_across_matrices params')
# check for required parameters
for p in ['workspace_name', 'corr_matrix_name', 'matrix_ref_1', 'matrix_ref_2']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _fetch_taxon(self, amplicon_set_ref, amplicon_ids):
logging.info('start fetching taxon info from AmpliconSet')
taxons = dict()
taxons_level = dict()
amplicon_set_data = self.dfu.get_objects(
{'object_refs': [amplicon_set_ref]})['data'][0]['data']
amplicons = amplicon_set_data.get('amplicons')
for amplicon_id in amplicon_ids:
scientific_name = 'None'
level = 'Unknown'
try:
scientific_name = amplicons.get(amplicon_id).get('taxonomy').get('scientific_name')
except Exception:
pass
try:
level = amplicons.get(amplicon_id).get('taxonomy').get('taxon_level')
except Exception:
pass
taxons.update({amplicon_id: scientific_name})
taxons_level.update({amplicon_id: level})
# default empty taxons and taxons_level
if set(taxons.values()) == {'None'}:
taxons = None
if set(taxons_level.values()) == {'Unknown'}:
taxons_level = None
return taxons, taxons_level
def _build_top_corr_table(self, matrix_2D, output_directory,
original_matrix_ref=[], sig_matrix_2D=None):
row_ids = matrix_2D.get('row_ids')
col_ids = matrix_2D.get('col_ids')
values = matrix_2D.get('values')
data_df = pd.DataFrame(values, index=row_ids, columns=col_ids)
data_df.fillna(0, inplace=True)
sig_df = None
if sig_matrix_2D is not None:
sig_df = pd.DataFrame(sig_matrix_2D.get('values'),
index=sig_matrix_2D.get('row_ids'),
columns=sig_matrix_2D.get('col_ids'))
sig_df.fillna(0, inplace=True)
columns = list()
if len(original_matrix_ref) == 1:
data_df = data_df.mask(np.tril(np.ones(data_df.shape)).astype(np.bool)) # remove duplicate entries
res = self.dfu.get_objects({'object_refs': [original_matrix_ref[0]]})['data'][0]
obj_type = res['info'][2]
matrix_type = obj_type.split('-')[0].split('Matrix')[0].split('.')[-1]
columns.extend(['{} 1'.format(matrix_type), '{} 2'.format(matrix_type)])
elif len(original_matrix_ref) == 2:
for matrix_ref in original_matrix_ref:
res = self.dfu.get_objects({'object_refs': [matrix_ref]})['data'][0]
obj_type = res['info'][2]
matrix_type = obj_type.split('-')[0].split('Matrix')[0].split('.')[-1]
columns.append(matrix_type)
else:
columns = ['Variable 1', 'Variable 2']
value_col_name = 'Correlation Coefficient'
columns.append(value_col_name)
links = data_df.stack().reset_index()
# links = links[links.iloc[:, 0] != links.iloc[:, 1]] # remove self-comparison
links.columns = columns
# sort by absolute value
links = links.iloc[(-links[value_col_name].abs()).argsort()].reset_index(drop=True)
top_corr_limit = 200
top_corr = links[:top_corr_limit]
top_corr_size = top_corr.index.size
if sig_df is not None:
sig_values = list()
for i in range(top_corr_size):
corr_pair = top_corr.iloc[i]
first_item = corr_pair[0]
second_item = corr_pair[1]
sig_value = sig_df.loc[first_item, second_item]
sig_values.append(sig_value)
top_corr['P-Value'] = sig_values
headerColor = 'grey'
rowEvenColor = 'lightgrey'
rowOddColor = 'white'
# color codes from px.colors.diverging.RdBu
colors = ['rgb(247,247,247)', 'rgb(253,219,199)', 'rgb(244,165,130)', 'rgb(214,96,77)',
'rgb(67,147,195)', 'rgb(146,197,222)', 'rgb(209,229,240)']
interval = 0.3
corr_color_idx = (top_corr[value_col_name]/interval).apply(int) # divid coefficient by interval and round to int
fig = go.Figure(data=[go.Table(
header=dict(values=list(top_corr.columns),
line_color='darkslategray',
fill_color=headerColor,
align='left',
font=dict(color='white', size=12)),
cells=dict(values=top_corr.T.values,
line_color='darkslategray',
fill_color=[[rowOddColor, rowEvenColor]*top_corr_limit,
[rowOddColor, rowEvenColor]*top_corr_limit,
np.array(colors)[corr_color_idx],
[rowOddColor, rowEvenColor]*top_corr_limit],
align='left',
font=dict(color='darkslategray', size=11)))
])
if top_corr_size < top_corr_limit:
fig_title = 'All {} Coefficient Pairs'.format(top_corr_size)
else:
fig_title = 'Top {} Coefficient Pairs'.format(top_corr_limit)
fig.update_layout(
width=1200,
height=2000,
title=dict(text=fig_title, x=0.5,
font=dict(family='Times New Roman', size=30, color='Purple')))
table_file_name = 'top_corr_table.html'
table_file_path = os.path.join(output_directory, table_file_name)
fig.write_html(table_file_path)
tab_content = ''
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(table_file_name)
tab_content += 'style="border:none;"></iframe>'
return tab_content
def _build_top_scatter_plot(self, matrix_2D, output_directory, df1, df2,
original_matrix_ref=[], sig_matrix_2D=None):
row_ids = matrix_2D.get('row_ids')
col_ids = matrix_2D.get('col_ids')
values = matrix_2D.get('values')
data_df = pd.DataFrame(values, index=row_ids, columns=col_ids)
data_df.fillna(0, inplace=True)
sig_df = None
if sig_matrix_2D is not None:
sig_df = pd.DataFrame(sig_matrix_2D.get('values'),
index=sig_matrix_2D.get('row_ids'),
columns=sig_matrix_2D.get('col_ids'))
sig_df.fillna(0, inplace=True)
columns = list()
if len(original_matrix_ref) == 1:
data_df = data_df.mask(np.tril(np.ones(data_df.shape)).astype(np.bool))
res = self.dfu.get_objects({'object_refs': [original_matrix_ref[0]]})['data'][0]
obj_type = res['info'][2]
matrix_type = obj_type.split('-')[0].split('Matrix')[0].split('.')[-1]
columns.extend(['{} 1'.format(matrix_type), '{} 2'.format(matrix_type)])
elif len(original_matrix_ref) == 2:
for matrix_ref in original_matrix_ref:
res = self.dfu.get_objects({'object_refs': [matrix_ref]})['data'][0]
obj_type = res['info'][2]
matrix_type = obj_type.split('-')[0].split('Matrix')[0].split('.')[-1]
columns.append(matrix_type)
else:
columns = ['Variable 1', 'Variable 2']
value_col_name = 'Correlation Coefficient'
columns.append(value_col_name)
links = data_df.stack().reset_index()
# links = links[links.iloc[:, 0] != links.iloc[:, 1]] # remove self-comparison
links.columns = columns
# sort by absolute value
links = links.iloc[(-links[value_col_name].abs()).argsort()].reset_index(drop=True)
top_corr_limit = 20
top_corr = links[:top_corr_limit]
num_plots = top_corr.index.size
warnings = ''
if top_corr_limit < links.index.size:
warnings += 'Note: Limiting to top {} plots'.format(top_corr_limit)
num_cols = 3
num_rows = math.ceil(num_plots/num_cols)
fig = make_subplots(rows=num_rows, cols=num_cols)
colors = px.colors.qualitative.Plotly
colors_size = len(colors)
for i in range(num_plots):
corr_pair = top_corr.iloc[i]
first_item = corr_pair[0]
second_item = corr_pair[1]
corr_r = corr_pair[2]
first_item_matrix_value = list(df1.loc[first_item].values)
second_item_matrix_value = list(df2.loc[second_item].values)
sub_fig = go.Scatter(
x=first_item_matrix_value,
y=second_item_matrix_value,
mode='markers',
showlegend=False,
opacity=0.65,
marker_color=colors[i % colors_size],
marker=dict(size=8,)
)
X = df1.loc[first_item].values.reshape(-1, 1)
model = LinearRegression()
model.fit(X, df2.loc[second_item].values)
x_range = np.linspace(X.min(), X.max(), 100)
y_range = model.predict(x_range.reshape(-1, 1))
sub_fig_trend = go.Scatter(
x=x_range,
y=y_range,
showlegend=False,
marker_color=colors[i % colors_size],
)
fig.add_trace(sub_fig, row=i//num_cols + 1, col=i % num_cols + 1)
fig.add_trace(sub_fig_trend, row=i//num_cols + 1, col=i % num_cols + 1)
anno_text = 'correlation coefficient={}'.format(corr_r)
if i == 0:
fig.update_layout({'xaxis': {'title': '{} ({})'.format(first_item[:10], links.columns[0])}})
fig.update_layout({'yaxis': {'title': '{} ({})'.format(second_item[:10], links.columns[1])}})
x_start = fig['layout']['xaxis']['domain'][0]
x_end = fig['layout']['xaxis']['domain'][1]
fig.add_annotation(
text=anno_text,
x=(x_end - x_start) / 2 + x_start,
y=fig['layout']['yaxis']['domain'][1],
xref='paper',
yref='paper',
xanchor='center',
yanchor='top',
showarrow=False
)
if sig_df is not None:
sig_value = sig_df.loc[first_item, second_item]
anno_text = 'p-value={}'.format(sig_value)
fig.add_annotation(
text=anno_text,
x=(x_end - x_start) / 2 + x_start,
y=fig['layout']['yaxis']['domain'][1] - 0.007,
xref='paper',
yref='paper',
xanchor='center',
yanchor='top',
showarrow=False
)
else:
fig.update_layout({'xaxis{}'.format(i+1): {'title': '{} ({})'.format(first_item[:10], links.columns[0])}})
fig.update_layout({'yaxis{}'.format(i+1): {'title': '{} ({})'.format(second_item[:10], links.columns[1])}})
x_start = fig['layout']['xaxis{}'.format(i+1)]['domain'][0]
x_end = fig['layout']['xaxis{}'.format(i+1)]['domain'][1]
fig.add_annotation(
text=anno_text,
x=(x_end - x_start) / 2 + x_start,
y=fig['layout']['yaxis{}'.format(i+1)]['domain'][1],
xref='paper',
yref='paper',
xanchor='center',
yanchor='top',
showarrow=False
)
if sig_df is not None:
sig_value = sig_df.loc[first_item, second_item]
anno_text = 'p-value={}'.format(sig_value)
fig.add_annotation(
text=anno_text,
x=(x_end - x_start) / 2 + x_start,
y=fig['layout']['yaxis{}'.format(i+1)]['domain'][1] - 0.007,
xref='paper',
yref='paper',
xanchor='center',
yanchor='top',
showarrow=False
)
fig_title = 'Scatter Plot For Top {} Coefficient Pairs'.format(num_plots)
fig.update_layout(
width=1200,
height=2000,
title=dict(text=fig_title, x=0.5,
font=dict(family='Times New Roman', size=30, color='Purple')),
font=dict(family="Courier New, monospace", size=10, color="RebeccaPurple"))
plot_file_name = 'top_scatter_plot.html'
plot_file_path = os.path.join(output_directory, plot_file_name)
fig.write_html(plot_file_path)
tab_content = ''
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(plot_file_name)
tab_content += 'style="border:none;"></iframe>'
return tab_content
def _build_heatmap_content(self, matrix_2D, output_directory, centered_by=None):
row_ids = matrix_2D.get('row_ids')
col_ids = matrix_2D.get('col_ids')
values = matrix_2D.get('values')
data_df = pd.DataFrame(values, index=row_ids, columns=col_ids)
data_df.fillna(0, inplace=True)
tsv_file_path = os.path.join(output_directory, 'heatmap_data_{}.tsv'.format(
str(uuid.uuid4())))
data_df.to_csv(tsv_file_path)
heatmap_dir = self.report_util.build_heatmap_html({
'tsv_file_path': tsv_file_path,
'cluster_data': True,
'centered_by': centered_by})['html_dir']
heatmap_report_files = os.listdir(heatmap_dir)
heatmap_index_page = None
for heatmap_report_file in heatmap_report_files:
if heatmap_report_file.endswith('.html'):
heatmap_index_page = heatmap_report_file
shutil.copy2(os.path.join(heatmap_dir, heatmap_report_file),
output_directory)
tab_content = '\n'
if heatmap_index_page:
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
else:
tab_content += '''\n<p style="color:red;" >'''
tab_content += '''Heatmap is too large to be displayed.</p>\n'''
return tab_content
def _build_table_content(self, matrix_2D, output_directory, original_matrix_ref=[],
type='corr'):
"""
_build_table_content: generate HTML table content for FloatMatrix2D object
"""
page_content = """\n"""
table_file_name = '{}_table.html'.format(type)
data_file_name = '{}_data.json'.format(type)
page_content += """<iframe height="1300px" width="100%" """
page_content += """src="{}" """.format(table_file_name)
page_content += """style="border:none;"></iframe>\n"""
row_ids = matrix_2D.get('row_ids')
col_ids = matrix_2D.get('col_ids')
values = matrix_2D.get('values')
df = pd.DataFrame(values, index=row_ids, columns=col_ids)
columns = list()
taxons = None
taxons_level = None
if len(original_matrix_ref) == 1:
df = df.mask(np.tril(np.ones(df.shape)).astype(np.bool))
res = self.dfu.get_objects({'object_refs': [original_matrix_ref[0]]})['data'][0]
obj_type = res['info'][2]
matrix_type = obj_type.split('-')[0].split('Matrix')[0].split('.')[-1]
# if matrix_type == 'Amplicon':
# amplicon_set_ref = res['data'].get('amplicon_set_ref')
# if amplicon_set_ref:
# taxons, taxons_level = self._fetch_taxon(amplicon_set_ref, col_ids)
columns.extend(['{} 1'.format(matrix_type), '{} 2'.format(matrix_type)])
elif len(original_matrix_ref) == 2:
for matrix_ref in original_matrix_ref[::-1]:
res = self.dfu.get_objects({'object_refs': [matrix_ref]})['data'][0]
obj_type = res['info'][2]
matrix_type = obj_type.split('-')[0].split('Matrix')[0].split('.')[-1]
# if matrix_type == 'Amplicon':
# amplicon_set_ref = res['data'].get('amplicon_set_ref')
# if amplicon_set_ref:
# taxons, taxons_level = self._fetch_taxon(amplicon_set_ref, col_ids)
columns.append(matrix_type)
else:
columns = ['Variable 1', 'Variable 2']
links = df.stack().reset_index()
# remove self-comparison
links = links[links.iloc[:, 0] != links.iloc[:, 1]]
if type == 'corr':
columns.append('Correlation')
elif type == 'sig':
columns.append('Significance')
else:
columns.append('Value')
links.columns = columns
if taxons:
links['Taxon'] = links.iloc[:, 0].map(taxons)
if taxons_level:
links['Taxon Level'] = links.iloc[:, 0].map(taxons_level)
table_headers = links.columns.tolist()
table_content = """\n"""
# build header and footer
table_content += """\n<thead>\n<tr>\n"""
for table_header in table_headers:
table_content += """\n <th>{}</th>\n""".format(table_header)
table_content += """\n</tr>\n</thead>\n"""
table_content += """\n<tfoot>\n<tr>\n"""
for table_header in table_headers:
table_content += """\n <th>{}</th>\n""".format(table_header)
table_content += """\n</tr>\n</tfoot>\n"""
logging.info('start generating table json file')
data_array = links.values.tolist()
total_rec = len(data_array)
json_dict = {'draw': 1,
'recordsTotal': total_rec,
'recordsFiltered': total_rec,
'data': data_array}
with open(os.path.join(output_directory, data_file_name), 'w') as fp:
json.dump(json_dict, fp)
logging.info('start generating table html')
with open(os.path.join(output_directory, table_file_name), 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'table_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>table_header</p>',
table_content)
report_template = report_template.replace('ajax_file_path',
data_file_name)
report_template = report_template.replace('deferLoading_size',
str(total_rec))
result_file.write(report_template)
return page_content
def _generate_visualization_content(self, output_directory, corr_matrix_obj_ref,
corr_matrix_plot_path, scatter_plot_path, df1, df2):
"""
<div class="tab">
<button class="tablinks" onclick="openTab(event, 'CorrelationMatrix')" id="defaultOpen">Correlation Matrix</button>
</div>
<div id="CorrelationMatrix" class="tabcontent">
<p>CorrelationMatrix_Content</p>
</div>"""
tab_def_content = ''
tab_content = ''
corr_data = self.dfu.get_objects({'object_refs': [corr_matrix_obj_ref]})['data'][0]['data']
coefficient_data = corr_data.get('coefficient_data')
significance_data = corr_data.get('significance_data')
original_matrix_ref = corr_data.get('original_matrix_ref')
tab_def_content += """
<div class="tab">
<button class="tablinks" onclick="openTab(event, 'CorrelationTable')" id="defaultOpen">Coefficient Table</button>
"""
corr_table_content = self._build_top_corr_table(coefficient_data, output_directory,
original_matrix_ref=original_matrix_ref,
sig_matrix_2D=significance_data)
tab_content += """
<div id="CorrelationTable" class="tabcontent">{}</div>""".format(corr_table_content)
tab_def_content += """
<button class="tablinks" onclick="openTab(event, 'ScatterTopMatrix')">Plot Top Coefficient Pairs</button>
"""
scatter_top_content = self._build_top_scatter_plot(coefficient_data, output_directory,
df1, df2,
original_matrix_ref=original_matrix_ref,
sig_matrix_2D=significance_data)
tab_content += """
<div id="ScatterTopMatrix" class="tabcontent">{}</div>""".format(scatter_top_content)
tab_def_content += """
<div class="tab">
<button class="tablinks" onclick="openTab(event, 'CorrelationMatrix')">Correlation Matrix Heatmap</button>
"""
corr_heatmap_content = self._build_heatmap_content(coefficient_data, output_directory,
centered_by=0)
tab_content += """
<div id="CorrelationMatrix" class="tabcontent">{}</div>""".format(corr_heatmap_content)
if significance_data:
tab_def_content += """
<button class="tablinks" onclick="openTab(event, 'SignificanceMatrix')">Significance Matrix Heatmap</button>
"""
sig_heatmap_content = self._build_heatmap_content(significance_data, output_directory)
tab_content += """
<div id="SignificanceMatrix" class="tabcontent">{}</div>""".format(sig_heatmap_content)
if corr_matrix_plot_path:
tab_def_content += """
<button class="tablinks" onclick="openTab(event, 'CorrelationMatrixPlot')">Correlation Matrix Heatmap</button>
"""
tab_content += """
<div id="CorrelationMatrixPlot" class="tabcontent">
"""
if corr_matrix_plot_path.endswith('.png'):
corr_matrix_plot_name = 'CorrelationMatrixPlot.png'
corr_matrix_plot_display_name = 'Correlation Matrix Plot'
shutil.copy2(corr_matrix_plot_path,
os.path.join(output_directory, corr_matrix_plot_name))
tab_content += '<div class="gallery">'
tab_content += '<a target="_blank" href="{}">'.format(corr_matrix_plot_name)
tab_content += '<img src="{}" '.format(corr_matrix_plot_name)
tab_content += 'alt="{}" width="600" height="400">'.format(
corr_matrix_plot_display_name)
tab_content += '</a><div class="desc">{}</div></div>'.format(
corr_matrix_plot_display_name)
elif corr_matrix_plot_path.endswith('.html'):
corr_matrix_plot_name = 'CorrelationMatrixPlot.html'
shutil.copy2(corr_matrix_plot_path,
os.path.join(output_directory, corr_matrix_plot_name))
tab_content += '<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(corr_matrix_plot_name)
tab_content += 'style="border:none;"></iframe>\n<p></p>\n'
else:
raise ValueError('unexpected correlation matrix plot format:\n{}'.format(
corr_matrix_plot_path))
tab_content += """</div>"""
if scatter_plot_path:
tab_def_content += """
<button class="tablinks" onclick="openTab(event, 'ScatterMatrixPlot')">Scatter Matrix Plot</button>
"""
tab_content += """
<div id="ScatterMatrixPlot" class="tabcontent">
"""
scatter_plot_name = 'ScatterMatrixPlot.png'
scatter_plot_display_name = 'Scatter Matrix Plot'
shutil.copy2(scatter_plot_path,
os.path.join(output_directory, scatter_plot_name))
tab_content += '<div class="gallery">'
tab_content += '<a target="_blank" href="{}">'.format(scatter_plot_name)
tab_content += '<img src="{}" '.format(scatter_plot_name)
tab_content += 'alt="{}" width="600" height="400">'.format(
scatter_plot_display_name)
tab_content += '</a><div class="desc">{}</div></div>'.format(
scatter_plot_display_name)
tab_content += """</div>"""
tab_def_content += """</div>"""
return tab_def_content + tab_content
def _generate_corr_html_report(self, corr_matrix_obj_ref, corr_matrix_plot_path,
scatter_plot_path, df1, df2):
"""
_generate_corr_html_report: generate html summary report for correlation
"""
logging.info('Start generating html report')
html_report = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'compute_correlation_report.html')
visualization_content = self._generate_visualization_content(
output_directory,
corr_matrix_obj_ref,
corr_matrix_plot_path,
scatter_plot_path, df1, df2)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'corr_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Compute Correlation App'
})
return html_report
def _generate_corr_report(self, corr_matrix_obj_ref, workspace_name, corr_matrix_plot_path,
scatter_plot_path=None, df1=None, df2=None):
"""
_generate_report: generate summary report
"""
logging.info('Start creating report')
output_html_files = self._generate_corr_html_report(corr_matrix_obj_ref,
corr_matrix_plot_path,
scatter_plot_path, df1, df2)
report_params = {'message': '',
'objects_created': [{'ref': corr_matrix_obj_ref,
'description': 'Correlation Matrix'}],
'workspace_name': workspace_name,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 1400,
'report_object_name': 'compute_correlation_matrix_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _corr_for_matrix(self, input_obj_ref, method, dimension):
"""
_corr_for_matrix: compute correlation matrix df for KBaseMatrices object
"""
data_matrix = self.data_util.fetch_data({'obj_ref': input_obj_ref}).get('data_matrix')
data_df = pd.read_json(data_matrix)
data_df = data_df.reindex(index=natsorted(data_df.index))
data_df = data_df.reindex(columns=natsorted(data_df.columns))
data_df.fillna(0, inplace=True)
corr_df = self.df_to_corr(data_df, method=method, dimension=dimension)
return corr_df, data_df
def _compute_significance(self, data_df, dimension):
"""
_compute_significance: compute pairwsie significance dataframe
two-sided p-value for a hypothesis test
"""
logging.info('Start computing significance matrix')
if dimension == 'row':
data_df = data_df.T
data_df = data_df.dropna()._get_numeric_data()
dfcols = pd.DataFrame(columns=data_df.columns)
sig_df = dfcols.transpose().join(dfcols, how='outer')
for r in data_df.columns:
for c in data_df.columns:
pvalue = stats.linregress(data_df[r], data_df[c])[3]
sig_df[r][c] = round(pvalue, 4)
return sig_df
def _df_to_list(self, df, threshold=None):
"""
_df_to_list: convert Dataframe to FloatMatrix2D matrix data
"""
df.fillna(0, inplace=True)
if threshold:
drop_cols = list()
for col in df.columns:
if all(df[col] < threshold) and all(df[col] > -threshold):
drop_cols.append(col)
df.drop(columns=drop_cols, inplace=True, errors='ignore')
drop_idx = list()
for idx in df.index:
if all(df.loc[idx] < threshold) and all(df.loc[idx] > -threshold):
drop_idx.append(idx)
df.drop(index=drop_idx, inplace=True, errors='ignore')
matrix_data = {'row_ids': df.index.astype(str).tolist(),
'col_ids': df.columns.astype(str).tolist(),
'values': df.values.tolist()}
return matrix_data
def _save_corr_matrix(self, workspace_name, corr_matrix_name, corr_df, sig_df, method,
matrix_ref=None, corr_threshold=None):
"""
_save_corr_matrix: save KBaseExperiments.CorrelationMatrix object
"""
logging.info('Start saving CorrelationMatrix')
if not isinstance(workspace_name, int):
ws_name_id = self.dfu.ws_name_to_id(workspace_name)
else:
ws_name_id = workspace_name
corr_data = {}
corr_data.update({'coefficient_data': self._df_to_list(corr_df,
threshold=corr_threshold)})
corr_data.update({'correlation_parameters': {'method': method}})
if matrix_ref:
corr_data.update({'original_matrix_ref': matrix_ref})
if sig_df is not None:
corr_data.update({'significance_data': self._df_to_list(sig_df)})
obj_type = 'KBaseExperiments.CorrelationMatrix'
info = self.dfu.save_objects({
"id": ws_name_id,
"objects": [{
"type": obj_type,
"data": corr_data,
"name": corr_matrix_name
}]
})[0]
return "%s/%s/%s" % (info[6], info[0], info[4])
def _Matrix2D_to_df(self, Matrix2D):
"""
_Matrix2D_to_df: transform a FloatMatrix2D to data frame
"""
index = Matrix2D.get('row_ids')
columns = Matrix2D.get('col_ids')
values = Matrix2D.get('values')
df = pd.DataFrame(values, index=index, columns=columns)
return df
def _corr_to_df(self, corr_matrix_ref):
"""
retrieve correlation matrix ws object to coefficient_df and significance_df
"""
corr_data = self.dfu.get_objects({'object_refs': [corr_matrix_ref]})['data'][0]['data']
coefficient_data = corr_data.get('coefficient_data')
significance_data = corr_data.get('significance_data')
coefficient_df = self._Matrix2D_to_df(coefficient_data)
significance_df = None
if significance_data:
significance_df = self._Matrix2D_to_df(significance_data)
return coefficient_df, significance_df
def _corr_df_to_excel(self, coefficient_df, significance_df, result_dir, corr_matrix_ref):
"""
write correlation matrix dfs into excel
"""
corr_info = self.dfu.get_objects({'object_refs': [corr_matrix_ref]})['data'][0]['info']
corr_name = corr_info[1]
file_path = os.path.join(result_dir, corr_name + ".xlsx")
writer = pd.ExcelWriter(file_path)
coefficient_df.to_excel(writer, "coefficient_data", index=True)
if significance_df is not None:
significance_df.to_excel(writer, "significance_data", index=True)
writer.close()
def _update_taxonomy_index(self, data_df, amplicon_set_ref):
logging.info('start updating index with taxonomy info from AmpliconSet')
amplicon_set_data = self.dfu.get_objects(
{'object_refs': [amplicon_set_ref]})['data'][0]['data']
amplicons = amplicon_set_data.get('amplicons')
index = data_df.index.values
replace_index = list()
for idx in index:
scientific_name = None
try:
scientific_name = amplicons.get(idx).get('taxonomy').get('scientific_name')
except Exception:
pass
if scientific_name:
replace_index.append(scientific_name + '_' + idx)
else:
replace_index.append(idx)
for idx, val in enumerate(replace_index):
index[idx] = val
return data_df
def _fetch_matrix_data(self, matrix_ref):
logging.info('start fectching matrix data')
res = self.dfu.get_objects({'object_refs': [matrix_ref]})['data'][0]
obj_type = res['info'][2]
if "KBaseMatrices" in obj_type or 'KBaseProfile' in obj_type:
data_matrix = self.data_util.fetch_data({'obj_ref': matrix_ref}).get('data_matrix')
data_df = pd.read_json(data_matrix)
data_df = data_df.reindex(index=natsorted(data_df.index))
data_df = data_df.reindex(columns=natsorted(data_df.columns))
return data_df
else:
err_msg = 'Ooops! [{}] is not supported.\n'.format(obj_type)
err_msg += 'Please supply KBaseMatrices or KBaseProfile object'
raise ValueError(err_msg)
def _compute_metrices_corr(self, df1, df2, method, compute_significance):
df1.fillna(0, inplace=True)
df2.fillna(0, inplace=True)
col_1 = df1.columns
col_2 = df2.columns
idx_1 = df1.index
idx_2 = df2.index
common_col = col_1.intersection(col_2)
logging.info('matrices share [{}] common columns'.format(common_col.size))
if common_col.empty:
raise ValueError('Matrices share no common columns')
logging.info('start trimming original matrix')
df1 = df1.loc[:][common_col]
df2 = df2.loc[:][common_col]
corr_df = pd.DataFrame(index=idx_1, columns=idx_2)
sig_df = pd.DataFrame(index=idx_1, columns=idx_2)
logging.info('start calculating correlation matrix')
logging.info('sizing {} x {}'.format(idx_1.size, idx_2.size))
counter = 0
for idx_value in idx_1:
for col_value in idx_2:
if counter % 100000 == 0:
logging.info('computed {} corr/sig values'.format(counter))
value_array_1 = df1.loc[idx_value].tolist()
value_array_2 = df2.loc[col_value].tolist()
if method == 'pearson':
corr_value, p_value = stats.pearsonr(value_array_1, value_array_2)
elif method == 'spearman':
corr_value, p_value = stats.spearmanr(value_array_1, value_array_2)
elif method == 'kendall':
corr_value, p_value = stats.kendalltau(value_array_1, value_array_2)
elif method == 'mutual_info':
corr_value = metrics.adjusted_mutual_info_score(value_array_1, value_array_2)
# p_value = stats.linregress(value_array_1, value_array_2)[3]
p_value = 0
else:
err_msg = 'Input correlation method [{}] is not available.\n'.format(method)
err_msg += 'Please choose one of {}'.format(CORR_METHOD)
raise ValueError(err_msg)
corr_df.at[idx_value, col_value] = round(corr_value, 4)
if compute_significance:
sig_df.at[idx_value, col_value] = round(p_value, 4)
counter += 1
if not compute_significance:
sig_df = None
return corr_df, sig_df, df1, df2
def __init__(self, config):
self.ws_url = config["workspace-url"]
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.scratch = config['scratch']
self.data_util = DataUtil(config)
self.dfu = DataFileUtil(self.callback_url)
self.report_util = kb_GenericsReport(self.callback_url)
plt.switch_backend('agg')
def df_to_corr(self, df, method='pearson', dimension='col'):
"""
Compute pairwise correlation of dimension (col or row)
method: one of ['pearson', 'kendall', 'spearman']
"""
logging.info('Computing correlation matrix')
if method not in CORR_METHOD:
err_msg = 'Input correlation method [{}] is not available.\n'.format(method)
err_msg += 'Please choose one of {}'.format(CORR_METHOD)
raise ValueError(err_msg)
if dimension == 'row':
df = df.T
elif dimension != 'col':
err_msg = 'Input dimension [{}] is not available.\n'.format(dimension)
err_msg += 'Please choose either "col" or "row"'
raise ValueError(err_msg)
corr_df = df.corr(method=method).round(4)
return corr_df
def plotly_corr_matrix(self, corr_df):
logging.info('Plotting matrix of correlation')
result_dir = os.path.join(self.scratch, str(uuid.uuid4()) + '_corr_matrix_plots')
self._mkdir_p(result_dir)
try:
trace = go.Heatmap(z=corr_df.values,
x=corr_df.columns,
y=corr_df.index)
data = [trace]
except Exception:
err_msg = 'Running plotly_corr_matrix returned an error:\n{}\n'.format(
traceback.format_exc())
raise ValueError(err_msg)
else:
corr_matrix_plot_path = os.path.join(result_dir, 'corr_matrix_plots.html')
logging.info('Saving plot to:\n{}'.format(corr_matrix_plot_path))
plot(data, filename=corr_matrix_plot_path)
return corr_matrix_plot_path
def plot_corr_matrix(self, corr_df):
"""
plot_corr_matrix: genreate correlation matrix plot
"""
logging.info('Plotting matrix of correlation')
result_dir = os.path.join(self.scratch, str(uuid.uuid4()) + '_corr_matrix_plots')
self._mkdir_p(result_dir)
try:
plt.clf()
matrix_size = corr_df.index.size
figsize = 10 if matrix_size / 5 < 10 else matrix_size / 5
fig, ax = plt.subplots(figsize=(figsize, figsize))
cax = ax.matshow(corr_df)
plt.xticks(list(range(len(corr_df.columns))), corr_df.columns, rotation='vertical',
fontstyle='italic')
plt.yticks(list(range(len(corr_df.columns))), corr_df.columns, fontstyle='italic')
plt.colorbar(cax)
except Exception:
err_msg = 'Running plot_corr_matrix returned an error:\n{}\n'.format(
traceback.format_exc())
raise ValueError(err_msg)
else:
corr_matrix_plot_path = os.path.join(result_dir, 'corr_matrix_plots.png')
logging.info('Saving plot to:\n{}'.format(corr_matrix_plot_path))
plt.savefig(corr_matrix_plot_path)
return corr_matrix_plot_path
def plot_scatter_matrix(self, df, dimension='col', alpha=0.2, diagonal='kde', figsize=(10, 10)):
"""
plot_scatter_matrix: generate scatter plot for dimension (col or row)
ref: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.plotting.scatter_matrix.html
"""
logging.info('Plotting matrix of scatter')
result_dir = os.path.join(self.scratch, str(uuid.uuid4()) + '_scatter_plots')
self._mkdir_p(result_dir)
if dimension == 'row':
df = df.T
elif dimension != 'col':
err_msg = 'Input dimension [{}] is not available.\n'.format(dimension)
err_msg += 'Please choose either "col" or "row"'
raise ValueError(err_msg)
try:
plt.clf()
sm = | pd.plotting.scatter_matrix(df, alpha=alpha, diagonal=diagonal, figsize=figsize) | pandas.plotting.scatter_matrix |
#!/usr/bin/env python
# Copyright (c) <NAME>.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pandas as pd
import os
with open("data/pwkp.test.orig") as f:
original_content = f.readlines()
original_content = [t.strip() for t in original_content]
# original_content_low = [sent.lower() for sent in original_content]
# PWKP
dataframe = pd.read_excel("data/pwkp_data.ods", engine="odf",header=[0, 1],)
#print(dataframe.columns)
current_system = ""
for i,row in dataframe.iterrows():
system = row["System", "Unnamed: 0_level_1"]
if type(system) == str:
current_system = system.strip().split(" (")[0]
dataframe.loc[i, [["System", "Unnamed: 0_level_1"]]] = current_system
n = 0
m = 0
dataframe["original"] = ""
dataframe["simplification"] = ""
systems = sorted(list(set(dataframe["System", "Unnamed: 0_level_1"])))
for system in systems:
if os.path.exists("data/"+system+".tok"):
with open("data/"+system+".tok") as f:
content = f.readlines()
current_original_content = original_content
elif os.path.exists("data/"+system+".tok.low"):
with open("data/"+system+".tok.low") as f:
content = f.readlines()
current_original_content = original_content # todo: _low add lowered sentences
else:
current_original_content = None
print("no data found for system", system)
continue
content = [t.strip() for t in content if t != "\n"]
if len(content) != len(dataframe[dataframe["System", "Unnamed: 0_level_1"] == system]):
print(system, len(content),len(dataframe[dataframe["System", "Unnamed: 0_level_1"] == system]))
for i, index in enumerate(dataframe[dataframe["System", "Unnamed: 0_level_1"] == system].index):
dataframe.loc[index, "simplification"] = content[i].strip()
dataframe.loc[index, "original"] = current_original_content[i].strip()
dataframe.to_csv("data/pwkp_with_text.csv")
def invert_rating(rating):
if rating == 3:
return 1
elif rating == 2:
return 2
elif rating == 1:
return 3
else:
return None
dataframe = | pd.read_csv("data/pwkp_with_text.csv", header=[0,1]) | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index= | date_range('1/1/2000', periods=10) | pandas.date_range |
import configparser
import json
import os
import time
from datetime import datetime, timedelta
import finnhub
import pandas as pd
from kafka import KafkaProducer
import numpy as np
# Finnhub API config
AUTH_TOKEN = os.environ.get("FINNHUB_AUTH_TOKEN")
if AUTH_TOKEN is None:
config = configparser.ConfigParser()
config.read("foobar/data_loader/conf/finnhub.cfg")
api_credential = config["api_credential"]
SLEEP_TIME = int(os.environ.get("SLEEP_TIME", 300))
# Kafka producer
KAFKA_BROKER_URL = (
os.environ.get("KAFKA_BROKER_URL")
if os.environ.get("KAFKA_BROKER_URL")
else "localhost:9092"
)
TOPIC_NAME = (
os.environ.get("TOPIC_NAME") if os.environ.get("TOPIC_NAME") else "from_finnhub"
)
class finnhub_producer:
def __init__(self, api_token):
self.last_poll_datetime = datetime.utcnow() - timedelta(minutes=500)
self.api_client = finnhub.Client(api_key=api_token)
self.producer = KafkaProducer(
bootstrap_servers=KAFKA_BROKER_URL,
value_serializer=lambda x: x.encode("utf8"),
api_version=(0, 11, 5),
)
def query_stock_candles(self, symbol, date_from, date_to):
from_ts = int(datetime.timestamp(date_from))
to_ts = int(datetime.timestamp(date_to))
out = self.api_client.stock_candles(
symbol=symbol, resolution="5", _from=from_ts, to=to_ts
)
if out["s"] == "no_data":
print("no data")
return
else:
df = pd.DataFrame(out)
df = df.rename(
columns={
"c": "close_price",
"o": "open_price",
"h": "high_price",
"l": "low_price",
"v": "volume",
"t": "hour",
"s": "status",
}
)
stock_candle_timeseries = df.reset_index()#.set_index("timestamp")
return stock_candle_timeseries
def run(self):
date_from = self.last_poll_datetime
date_to = datetime.utcnow()
print(f'Getting stock price from {date_from} to {date_to}')
ts = self.query_stock_candles(
symbol="GME", date_from=date_from, date_to=date_to
)
if ts is not None:
print('Sending financial data to Kafka queue...')
ts['hour'] = | pd.to_datetime(ts['hour'], unit="s") | pandas.to_datetime |
"""
Module contains tools for collecting data from various remote sources
"""
import warnings
import tempfile
import datetime as dt
import time
from collections import defaultdict
import numpy as np
from pandas.compat import(
StringIO, bytes_to_str, range, lmap, zip
)
import pandas.compat as compat
from pandas import Panel, DataFrame, Series, read_csv, concat, to_datetime, DatetimeIndex, DateOffset
from pandas.core.common import is_list_like, PandasError
from pandas.io.common import urlopen, ZipFile, urlencode
from pandas.tseries.offsets import MonthEnd
from pandas.util.testing import _network_error_classes
from pandas.io.html import read_html
warnings.warn("\n"
"The pandas.io.data module is moved to a separate package "
"(pandas-datareader) and will be removed from pandas in a "
"future version.\nAfter installing the pandas-datareader package "
"(https://github.com/pydata/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.",
FutureWarning)
class SymbolWarning(UserWarning):
pass
class RemoteDataError(PandasError, IOError):
pass
def DataReader(name, data_source=None, start=None, end=None,
retry_count=3, pause=0.001):
"""
Imports data from a number of online sources.
Currently supports Yahoo! Finance, Google Finance, St. Louis FED (FRED)
and Kenneth French's data library.
Parameters
----------
name : str or list of strs
the name of the dataset. Some data sources (yahoo, google, fred) will
accept a list of names.
data_source: str, default: None
the data source ("yahoo", "google", "fred", or "ff")
start : datetime, default: None
left boundary for range (defaults to 1/1/2010)
end : datetime, default: None
right boundary for range (defaults to today)
retry_count : int, default 3
Number of times to retry query request.
pause : numeric, default 0.001
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
Examples
----------
# Data from Yahoo! Finance
gs = DataReader("GS", "yahoo")
# Data from Google Finance
aapl = DataReader("AAPL", "google")
# Data from FRED
vix = DataReader("VIXCLS", "fred")
# Data from Fama/French
ff = DataReader("F-F_Research_Data_Factors", "famafrench")
ff = DataReader("F-F_Research_Data_Factors_weekly", "famafrench")
ff = DataReader("6_Portfolios_2x3", "famafrench")
ff = DataReader("F-F_ST_Reversal_Factor", "famafrench")
"""
start, end = _sanitize_dates(start, end)
if data_source == "yahoo":
return get_data_yahoo(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "google":
return get_data_google(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "fred":
return get_data_fred(name, start, end)
elif data_source == "famafrench":
return get_data_famafrench(name)
def _sanitize_dates(start, end):
from pandas.core.datetools import to_datetime
start = to_datetime(start)
end = to_datetime(end)
if start is None:
start = dt.datetime(2010, 1, 1)
if end is None:
end = dt.datetime.today()
return start, end
def _in_chunks(seq, size):
"""
Return sequence in 'chunks' of size defined by size
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
_yahoo_codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r',
'time': 't1', 'short_ratio': 's7'}
_YAHOO_QUOTE_URL = 'http://finance.yahoo.com/d/quotes.csv?'
def get_quote_yahoo(symbols):
"""
Get current yahoo quote
Returns a DataFrame
"""
if isinstance(symbols, compat.string_types):
sym_list = symbols
else:
sym_list = '+'.join(symbols)
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
request = ''.join(compat.itervalues(_yahoo_codes)) # code request string
header = list(_yahoo_codes.keys())
data = defaultdict(list)
url_str = _YAHOO_QUOTE_URL + 's=%s&f=%s' % (sym_list, request)
with urlopen(url_str) as url:
lines = url.readlines()
for line in lines:
fields = line.decode('utf-8').strip().split(',')
for i, field in enumerate(fields):
if field[-2:] == '%"':
v = float(field.strip('"%'))
elif field[0] == '"':
v = field.strip('"')
else:
try:
v = float(field)
except ValueError:
v = field
data[header[i]].append(v)
idx = data.pop('symbol')
return DataFrame(data, index=idx)
def get_quote_google(symbols):
raise NotImplementedError("Google Finance doesn't have this functionality")
def _retry_read_url(url, retry_count, pause, name):
for _ in range(retry_count):
time.sleep(pause)
# kludge to close the socket ASAP
try:
with | urlopen(url) | pandas.io.common.urlopen |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = ps1._append(tds)
tm.assert_series_equal(res, | Series(exp, index=[0, 1, 0, 1]) | pandas.Series |
import datetime
import logging
import os
import random
import numpy as np
import pandas as pd
import pytest
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch_spec import (
AzureBatchSpec,
GCSBatchSpec,
PathBatchSpec,
RuntimeDataBatchSpec,
S3BatchSpec,
)
from great_expectations.execution_engine import SparkDFExecutionEngine
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.self_check.util import build_spark_engine
from great_expectations.validator.metric_configuration import MetricConfiguration
from tests.expectations.test_util import get_table_columns_metric
from tests.test_utils import create_files_in_directory
try:
pyspark = pytest.importorskip("pyspark")
# noinspection PyPep8Naming
import pyspark.sql.functions as F
from pyspark.sql.types import IntegerType, LongType, Row, StringType
except ImportError:
pyspark = None
F = None
IntegerType = None
LongType = None
StringType = None
Row = None
@pytest.fixture
def test_sparkdf(spark_session):
def generate_ascending_list_of_datetimes(
n, start_date=datetime.date(2020, 1, 1), end_date=datetime.date(2020, 12, 31)
):
start_time = datetime.datetime(
start_date.year, start_date.month, start_date.day
)
seconds_between_dates = (end_date - start_date).total_seconds()
# noinspection PyUnusedLocal
datetime_list = [
start_time
+ datetime.timedelta(seconds=random.randrange(int(seconds_between_dates)))
for i in range(n)
]
datetime_list.sort()
return datetime_list
k = 120
random.seed(1)
timestamp_list = generate_ascending_list_of_datetimes(
n=k, end_date=datetime.date(2020, 1, 31)
)
date_list = [datetime.date(ts.year, ts.month, ts.day) for ts in timestamp_list]
# noinspection PyUnusedLocal
batch_ids = [random.randint(0, 10) for i in range(k)]
batch_ids.sort()
# noinspection PyUnusedLocal
session_ids = [random.randint(2, 60) for i in range(k)]
session_ids = [i - random.randint(0, 2) for i in session_ids]
session_ids.sort()
# noinspection PyUnusedLocal
spark_df = spark_session.createDataFrame(
data=pd.DataFrame(
{
"id": range(k),
"batch_id": batch_ids,
"date": date_list,
"y": [d.year for d in date_list],
"m": [d.month for d in date_list],
"d": [d.day for d in date_list],
"timestamp": timestamp_list,
"session_ids": session_ids,
"event_type": [
random.choice(["start", "stop", "continue"]) for i in range(k)
],
"favorite_color": [
"#"
+ "".join(
[random.choice(list("0123456789ABCDEF")) for j in range(6)]
)
for i in range(k)
],
}
)
)
spark_df = spark_df.withColumn(
"timestamp", F.col("timestamp").cast(IntegerType()).cast(StringType())
)
return spark_df
def test_reader_fn(spark_session, basic_spark_df_execution_engine):
engine = basic_spark_df_execution_engine
# Testing that can recognize basic csv file
fn = engine._get_reader_fn(reader=spark_session.read, path="myfile.csv")
assert "<bound method DataFrameReader.csv" in str(fn)
# Ensuring that other way around works as well - reader_method should always override path
fn_new = engine._get_reader_fn(reader=spark_session.read, reader_method="csv")
assert "<bound method DataFrameReader.csv" in str(fn_new)
def test_reader_fn_parameters(
spark_session, basic_spark_df_execution_engine, tmp_path_factory
):
base_directory = str(tmp_path_factory.mktemp("test_csv"))
create_files_in_directory(
directory=base_directory,
file_name_list=[
"test-A.csv",
],
)
test_df_small_csv_path = base_directory + "/test-A.csv"
engine = basic_spark_df_execution_engine
fn = engine._get_reader_fn(reader=spark_session.read, path=test_df_small_csv_path)
assert "<bound method DataFrameReader.csv" in str(fn)
test_sparkdf_with_header_param = basic_spark_df_execution_engine.get_batch_data(
PathBatchSpec(
path=test_df_small_csv_path,
data_asset_name="DATA_ASSET",
reader_options={"header": True},
)
).dataframe
assert test_sparkdf_with_header_param.head() == Row(x="1", y="2")
test_sparkdf_with_no_header_param = basic_spark_df_execution_engine.get_batch_data(
PathBatchSpec(path=test_df_small_csv_path, data_asset_name="DATA_ASSET")
).dataframe
assert test_sparkdf_with_no_header_param.head() == Row(_c0="x", _c1="y")
def test_get_domain_records_with_column_domain(
spark_session, basic_spark_df_execution_engine
):
pd_df = pd.DataFrame(
{"a": [1, 2, 3, 4, 5], "b": [2, 3, 4, 5, None], "c": [1, 2, 3, 4, None]}
)
df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in pd_df.to_records(index=False)
],
pd_df.columns.tolist(),
)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="1234", batch_data=df)
data = engine.get_domain_records(
domain_kwargs={
"column": "a",
"row_condition": 'col("b")<5',
"condition_parser": "great_expectations__experimental__",
}
)
expected_column_pd_df = pd_df.iloc[:3]
expected_column_df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in expected_column_pd_df.to_records(index=False)
],
expected_column_pd_df.columns.tolist(),
)
assert dataframes_equal(
data, expected_column_df
), "Data does not match after getting full access compute domain"
def test_get_domain_records_with_column_pair_domain(
spark_session, basic_spark_df_execution_engine
):
pd_df = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [2, 3, 4, 5, None, 6],
"c": [1, 2, 3, 4, 5, None],
}
)
df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in pd_df.to_records(index=False)
],
pd_df.columns.tolist(),
)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="1234", batch_data=df)
data = engine.get_domain_records(
domain_kwargs={
"column_A": "a",
"column_B": "b",
"row_condition": 'col("b")>2',
"condition_parser": "great_expectations__experimental__",
"ignore_row_if": "both_values_are_missing",
}
)
expected_column_pair_pd_df = pd.DataFrame(
{"a": [2, 3, 4, 6], "b": [3.0, 4.0, 5.0, 6.0], "c": [2.0, 3.0, 4.0, None]}
)
expected_column_pair_df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in expected_column_pair_pd_df.to_records(index=False)
],
expected_column_pair_pd_df.columns.tolist(),
)
assert dataframes_equal(
data, expected_column_pair_df
), "Data does not match after getting full access compute domain"
pd_df = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [2, 3, 4, 5, None, 6],
"c": [1, 2, 3, 4, 5, None],
}
)
df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in pd_df.to_records(index=False)
],
pd_df.columns.tolist(),
)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="1234", batch_data=df)
data = engine.get_domain_records(
domain_kwargs={
"column_A": "b",
"column_B": "c",
"row_condition": 'col("b")>2',
"condition_parser": "great_expectations__experimental__",
"ignore_row_if": "either_value_is_missing",
}
)
for column_name in data.columns:
data = data.withColumn(column_name, data[column_name].cast(LongType()))
expected_column_pair_pd_df = pd.DataFrame(
{"a": [2, 3, 4], "b": [3, 4, 5], "c": [2, 3, 4]}
)
expected_column_pair_df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in expected_column_pair_pd_df.to_records(index=False)
],
expected_column_pair_pd_df.columns.tolist(),
)
assert dataframes_equal(
data, expected_column_pair_df
), "Data does not match after getting full access compute domain"
pd_df = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [2, 3, 4, 5, None, 6],
"c": [1, 2, 3, 4, 5, None],
}
)
df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in pd_df.to_records(index=False)
],
pd_df.columns.tolist(),
)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="1234", batch_data=df)
data = engine.get_domain_records(
domain_kwargs={
"column_A": "b",
"column_B": "c",
"row_condition": 'col("a")<6',
"condition_parser": "great_expectations__experimental__",
"ignore_row_if": "neither",
}
)
expected_column_pair_pd_df = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"b": [2.0, 3.0, 4.0, 5.0, None],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
)
expected_column_pair_df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in expected_column_pair_pd_df.to_records(index=False)
],
expected_column_pair_pd_df.columns.tolist(),
)
assert dataframes_equal(
data, expected_column_pair_df
), "Data does not match after getting full access compute domain"
def test_get_domain_records_with_multicolumn_domain(
spark_session, basic_spark_df_execution_engine
):
pd_df = pd.DataFrame(
{
"a": [1, 2, 3, 4, None, 5],
"b": [2, 3, 4, 5, 6, 7],
"c": [1, 2, 3, 4, None, 6],
}
)
df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in pd_df.to_records(index=False)
],
pd_df.columns.tolist(),
)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="1234", batch_data=df)
data = engine.get_domain_records(
domain_kwargs={
"column_list": ["a", "c"],
"row_condition": 'col("b")>2',
"condition_parser": "great_expectations__experimental__",
"ignore_row_if": "all_values_are_missing",
}
)
for column_name in data.columns:
data = data.withColumn(column_name, data[column_name].cast(LongType()))
expected_multicolumn_pd_df = pd.DataFrame(
{"a": [2, 3, 4, 5], "b": [3, 4, 5, 7], "c": [2, 3, 4, 6]}, index=[0, 1, 2, 4]
)
expected_multicolumn_df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in expected_multicolumn_pd_df.to_records(index=False)
],
expected_multicolumn_pd_df.columns.tolist(),
)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="1234", batch_data=expected_multicolumn_df)
assert dataframes_equal(
data, expected_multicolumn_df
), "Data does not match after getting full access compute domain"
pd_df = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [2, 3, 4, 5, None, 6],
"c": [1, 2, 3, 4, 5, None],
}
)
df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in pd_df.to_records(index=False)
],
pd_df.columns.tolist(),
)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="1234", batch_data=df)
data = engine.get_domain_records(
domain_kwargs={
"column_list": ["b", "c"],
"row_condition": 'col("a")<5',
"condition_parser": "great_expectations__experimental__",
"ignore_row_if": "any_value_is_missing",
}
)
for column_name in data.columns:
data = data.withColumn(column_name, data[column_name].cast(LongType()))
expected_multicolumn_pd_df = pd.DataFrame(
{"a": [1, 2, 3, 4], "b": [2, 3, 4, 5], "c": [1, 2, 3, 4]}, index=[0, 1, 2, 3]
)
expected_multicolumn_df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in expected_multicolumn_pd_df.to_records(index=False)
],
expected_multicolumn_pd_df.columns.tolist(),
)
assert dataframes_equal(
data, expected_multicolumn_df
), "Data does not match after getting full access compute domain"
pd_df = pd.DataFrame(
{
"a": [1, 2, 3, 4, None, 5],
"b": [2, 3, 4, 5, 6, 7],
"c": [1, 2, 3, 4, None, 6],
}
)
df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in pd_df.to_records(index=False)
],
pd_df.columns.tolist(),
)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="1234", batch_data=df)
data = engine.get_domain_records(
domain_kwargs={
"column_list": ["b", "c"],
"ignore_row_if": "never",
}
)
expected_multicolumn_pd_df = pd.DataFrame(
{
"a": [1, 2, 3, 4, None, 5],
"b": [2, 3, 4, 5, 6, 7],
"c": [1, 2, 3, 4, None, 6],
},
index=[0, 1, 2, 3, 4, 5],
)
expected_multicolumn_df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in expected_multicolumn_pd_df.to_records(index=False)
],
expected_multicolumn_pd_df.columns.tolist(),
)
assert dataframes_equal(
data, expected_multicolumn_df
), "Data does not match after getting full access compute domain"
def test_get_compute_domain_with_no_domain_kwargs(
spark_session, basic_spark_df_execution_engine
):
pd_df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]})
df = spark_session.createDataFrame(
[
tuple(
None if isinstance(x, (float, int)) and np.isnan(x) else x
for x in record.tolist()
)
for record in pd_df.to_records(index=False)
],
pd_df.columns.tolist(),
)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="1234", batch_data=df)
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={}, domain_type=MetricDomainTypes.TABLE
)
assert compute_kwargs is not None, "Compute domain kwargs should be existent"
assert accessor_kwargs == {}
assert data.schema == df.schema
assert data.collect() == df.collect()
def test_get_compute_domain_with_column_domain(
spark_session, basic_spark_df_execution_engine
):
pd_df = | pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri May 14 14:11:23 2021
@author: freeridingeo
"""
import numpy as np
import xarray as xr
import pandas as pd
from eolearn.core import FeatureParser
import sys
sys.path.append("D:/Code/eotopia/metadata")
from metadata_geographical import (get_eopatch_coordinates)
from metadata_eopatch import get_feature_dimensions
sys.path.append("D:/Code/eotopia/utils")
from string_utils import string_to_variable
import matplotlib.pyplot as plt
def filter_nan(s,o):
data = np.transpose(np.array([s.flatten(),o.flatten()]))
data = data[~np.isnan(data).any(1)]
return data[:,0], data[:,1]
def extract_difference_between_columns(df1, df2, col1, col2):
common = df1.merge(df2,on=[col1,col2])
diff = df1[(~df1[col1].isin(common[col1])) & (~df1[col2].isin(common[col2]))]
return diff
def df_grouby_and_count(df, col):
group_by = df.groupby(by=[col])
col_avg = group_by.mean()
col_count = group_by.count()
print("Mean value of " + str(col) + " is ", col_avg)
print("Number of " + str(col) + " is ", col_count)
def concatenate_dfs(list_of_dfs, kind="by_append"):
if kind == "by_append":
df_conc = pd.concat(list_of_dfs)
elif kind == "matrixed":
df_conc = pd.concat(list_of_dfs, axis=1)
return df_conc
def merge_dfs(df1, df2, colname, kind="inner"):
"""
how: Options:
"inner": print for common rows
"outer": print for all rows, not just common rows
"df1/df2"
"""
merged_df = pd.merge(df1, df2, how=kind, on=colname)
return merged_df
def join_dfs(df1, df2, kind="inner"):
"""
Joining is a convenient method for combining the columns of two potentially
differently-indexed DataFrames into a single result DataFrame.
how: Options:
"inner": print for common rows
"outer": print for all rows, not just common rows
"""
join_df = pd.merge(df1, df2, how=kind)
return join_df
def identify_unique(dataframe):
unique_counts = dataframe.nunique()
unique_stats = | pd.DataFrame(unique_counts) | pandas.DataFrame |
import argparse
import torch
import numpy as np
import pandas as pd
import pickle as pkl
from tqdm import tqdm
from torch.utils.data import DataLoader
from sklearn import metrics
from sklearn.model_selection import train_test_split
from dataset_node import construct_dataset, mol_collate_func
from transformer_node import make_model
from utils import ScheduledOptim, get_options
def loss_function(y_true, y_pred):
y_true, y_pred = y_true.flatten(), y_pred.flatten()
y_mask = torch.where(y_true != 0., torch.full_like(y_true, 1), torch.full_like(y_true, 0))
loss = torch.sum(torch.abs(y_true - y_pred * y_mask)) / torch.sum(y_mask)
return loss
def model_train(model, train_dataset, valid_dataset, model_params, train_params, dataset_name, element):
train_loader = DataLoader(dataset=train_dataset, batch_size=train_params['batch_size'], collate_fn=mol_collate_func,
shuffle=True, drop_last=True, num_workers=4, pin_memory=True)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=train_params['batch_size'], collate_fn=mol_collate_func,
shuffle=True, drop_last=True, num_workers=4, pin_memory=True)
# build optimizer
optimizer = ScheduledOptim(torch.optim.Adam(model.parameters(), lr=0),
train_params['warmup_factor'], model_params['d_model'],
train_params['total_warmup_steps'])
best_valid_loss = float('inf')
best_epoch = -1
best_valid_result = dict()
for epoch in range(train_params['total_epochs']):
# train
train_loss = list()
model.train()
for batch in tqdm(train_loader):
adjacency_matrix, node_features, edge_features, y_true = batch
adjacency_matrix = adjacency_matrix.to(train_params['device']) # (batch_size, max_length, max_length)
node_features = node_features.to(train_params['device']) # (batch_size, max_length, d_node)
edge_features = edge_features.to(train_params['device']) # (batch_size, max_length, max_length, d_edge)
y_true = y_true.to(train_params['device']) # (batch_size, max_length, 1)
batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0 # (batch_size, max_length)
# (batch_size, max_length, 1)
y_pred = model(node_features, batch_mask, adjacency_matrix, edge_features)
loss = loss_function(y_true, y_pred)
optimizer.zero_grad()
loss.backward()
optimizer.step_and_update_lr()
train_loss.append(loss.detach().item())
# valid
model.eval()
with torch.no_grad():
valid_result = dict()
valid_result['label'], valid_result['prediction'], valid_result['loss'] = list(), list(), list()
for batch in tqdm(valid_loader):
adjacency_matrix, node_features, edge_features, y_true = batch
adjacency_matrix = adjacency_matrix.to(train_params['device']) # (batch_size, max_length, max_length)
node_features = node_features.to(train_params['device']) # (batch_size, max_length, d_node)
edge_features = edge_features.to(train_params['device']) # (batch_size, max_length, max_length, d_edge)
batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0 # (batch_size, max_length)
# (batch_size, max_length, 1)
y_pred = model(node_features, batch_mask, adjacency_matrix, edge_features)
y_true = y_true.numpy().flatten()
y_pred = y_pred.cpu().detach().numpy().flatten()
y_mask = np.where(y_true != 0., 1, 0)
times = 0
for true, pred in zip(y_true, y_pred):
if true != 0.:
times += 1
valid_result['label'].append(true)
valid_result['prediction'].append(pred)
valid_result['loss'].append(np.abs(true - pred))
assert times == np.sum(y_mask)
valid_result['r2'] = metrics.r2_score(valid_result['label'], valid_result['prediction'])
print('Epoch {}, learning rate {:.6f}, train loss: {:.4f}, valid loss: {:.4f}, valid r2: {:.4f}'.format(
epoch + 1, optimizer.view_lr(), np.mean(train_loss), np.mean(valid_result['loss']), valid_result['r2']
))
# save the model and valid result
if np.mean(valid_result['loss']) < best_valid_loss:
best_valid_loss = np.mean(valid_result['loss'])
best_epoch = epoch + 1
best_valid_result = valid_result
torch.save({'state_dict': model.state_dict(),
'best_epoch': best_epoch, 'best_valid_loss': best_valid_loss},
f'./Model/{dataset_name}/best_model_{dataset_name}_{element}.pt')
# temp test
if (epoch + 1) % 10 == 0:
checkpoint = torch.load(f'./Model/{dataset_name}/best_model_{dataset_name}_{element}.pt')
print('=' * 20 + ' middle test ' + '=' * 20)
test_result = model_test(checkpoint, test_dataset, model_params, train_params)
print("best epoch: {}, best valid loss: {:.4f}, test loss: {:.4f}, test r2: {:.4f}".format(
checkpoint['best_epoch'], checkpoint['best_valid_loss'], np.mean(test_result['loss']), test_result['r2']
))
print('=' * 40)
# early stop
if abs(best_epoch - epoch) >= 20:
print("=" * 20 + ' early stop ' + "=" * 20)
break
return best_valid_result
def model_test(checkpoint, test_dataset, model_params, train_params):
# build loader
test_loader = DataLoader(dataset=test_dataset, batch_size=train_params['batch_size'], collate_fn=mol_collate_func,
shuffle=False, drop_last=True, num_workers=4, pin_memory=True)
# build model
model = make_model(**model_params)
model.to(train_params['device'])
model.load_state_dict(checkpoint['state_dict'])
# test
model.eval()
with torch.no_grad():
test_result = dict()
test_result['label'], test_result['prediction'], test_result['loss'] = list(), list(), list()
for batch in tqdm(test_loader):
adjacency_matrix, node_features, edge_features, y_true = batch
adjacency_matrix = adjacency_matrix.to(train_params['device']) # (batch_size, max_length, max_length)
node_features = node_features.to(train_params['device']) # (batch_size, max_length, d_node)
edge_features = edge_features.to(train_params['device']) # (batch_size, max_length, max_length, d_edge)
batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0 # (batch_size, max_length)
# (batch_size, max_length, 1)
y_pred = model(node_features, batch_mask, adjacency_matrix, edge_features)
y_true = y_true.numpy().flatten()
y_pred = y_pred.cpu().detach().numpy().flatten()
y_mask = np.where(y_true != 0., 1, 0)
times = 0
for true, pred in zip(y_true, y_pred):
if true != 0.:
times += 1
test_result['label'].append(true)
test_result['prediction'].append(pred)
test_result['loss'].append(np.abs(true - pred))
assert times == np.sum(y_mask)
test_result['r2'] = metrics.r2_score(test_result['label'], test_result['prediction'])
test_result['best_valid_loss'] = checkpoint['best_valid_loss']
return test_result
if __name__ == '__main__':
# init args
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, help="random seeds", default=np.random.randint(10000))
parser.add_argument("--gpu", type=str, help='gpu', default=-1)
parser.add_argument("--dataset", type=str, help='nmrshiftdb/DFT8K_DFT/DFT8K_FF/Exp5K_DFT/Exp5K_FF', default='nmrshiftdb')
parser.add_argument("--element", type=str, help="1H/13C", default='1H')
args = parser.parse_args()
# load options
model_params, train_params = get_options(args.dataset)
# init device and seed
print(f"Seed: {args.seed}")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
train_params['device'] = torch.device(f'cuda:{args.gpu}')
torch.cuda.manual_seed(args.seed)
else:
train_params['device'] = torch.device('cpu')
# load data
with open(f'./Data/{args.dataset}/preprocess/graph_{args.element}_train.pickle', 'rb') as f:
[train_all_mol, train_all_cs] = pkl.load(f)
with open(f'./Data/{args.dataset}/preprocess/graph_{args.element}_test.pickle', 'rb') as f:
[test_mol, test_cs] = pkl.load(f)
print('=' * 20 + ' begin train ' + '=' * 20)
# calculate the padding
model_params['max_length'] = max(max([data.GetNumAtoms() for data in train_all_mol]),
max([data.GetNumAtoms() for data in test_mol]))
print(f"Max padding length is: {model_params['max_length']}")
# split dataset
if args.dataset == 'nmrshiftdb':
train_mol, valid_mol, train_cs, valid_cs = train_test_split(
train_all_mol, train_all_cs, test_size=0.05, random_state=args.seed)
else:
train_mol, valid_mol, train_cs, valid_cs = train_test_split(
train_all_mol, train_all_cs, test_size=500, random_state=args.seed)
# load dataset, data_mean=0, data_std=1 for no use
train_dataset = construct_dataset(train_mol, train_cs, model_params['d_atom'], model_params['d_edge'],
model_params['max_length'])
valid_dataset = construct_dataset(valid_mol, valid_cs, model_params['d_atom'], model_params['d_edge'],
model_params['max_length'])
test_dataset = construct_dataset(test_mol, test_cs, model_params['d_atom'], model_params['d_edge'],
model_params['max_length'])
# calculate total warmup factor and steps
train_params['warmup_factor'] = 0.2 if args.element == '1H' else 1.0
train_params['total_warmup_steps'] = \
int(len(train_dataset) / train_params['batch_size']) * train_params['total_warmup_epochs']
print('train warmup step is: {}'.format(train_params['total_warmup_steps']))
# define a model
model = make_model(**model_params)
model = model.to(train_params['device'])
# train and valid
print(f"train size: {len(train_dataset)}, valid size: {len(valid_dataset)}, test size: {len(test_dataset)}")
best_valid_result = model_train(model, train_dataset, valid_dataset, model_params, train_params, args.dataset, args.element)
best_valid_csv = | pd.DataFrame.from_dict({'actual': best_valid_result['label'], 'predict': best_valid_result['prediction'], 'loss': best_valid_result['loss']}) | pandas.DataFrame.from_dict |
# %%
import os
import pandas as pd
import numpy as np
import datetime
# %%
BBDD500000 = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVIES 500.000.csv')
BBDD1xls = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 1xls.csv')
BBDD1xlsx = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 1xlsx.csv')
BBDD2xls = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 2xls.csv')
BBDD2xlsx = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 2xlsx.csv')
BBDD3 = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 3.csv')
BBDD4xls = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 4xls.csv')
BBDD4xlsx = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 4xlsx.csv')
BBDD5 = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 5.csv')
BBDD6 = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 6.csv')
BBDD7 = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 7.csv')
BBDD8 = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 8.csv')
BBDD9 = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 9.csv')
BBDD10 = pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 10.csv')
BBDD11 = | pd.read_csv(r'D:\Basededatos\Origen\BBDD AUTOMรVILES 9 MILLONES\CSV\BBDD AUTOMรVILES 11.csv') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
from six import string_types
import pandas as pd
import numpy as np
import gmeterpy.units as u
__all__ = ["Readings"]
class Readings:
def __init__(self, data=None, meta=None, units=None, corrections=None,
**kwargs):
if data is not None:
self._data = data
else:
self._data = pd.DataFrame()
if meta is None:
self._meta = {}
else:
self._meta = meta
self._corrections = corrections
if corrections is None:
self._corrections = {key: (key, {}) for key in self._data.columns
if 'c_' in key}
else:
self._corrections = corrections
self._proc = kwargs.pop('proc', {})
if units is None:
self.units = {}
else:
self.units = units
if not self._data.empty:
self._update_g_result()
self._data['jd'] = self._data.index.to_julian_date()
@property
def data(self):
return self._data
@data.setter
def data(self, d):
self._data = d
@property
def meta(self):
return self._meta
@property
def corrections(self):
return self._corrections
@property
def columns(self):
"""Return all column labels in the data.
"""
return list(self._data.columns.values)
@property
def index(self):
"""Return index of the data.
"""
return self._data.index
def copy(self):
return copy.deepcopy(self)
def quantity(self, column, **kwargs):
"""Return a `~astropy.units.Quantity` for the given column.
Parameters
----------
column : str
The column name to return as Quantity.
"""
# TODO: multiple columns
# TODO: add error handling (no units)
values = self._data[column].values
unit = self.units[column]
return u.Quantity(values, unit)
def add_quantity_column(self, column, quantity):
"""Add values and units from `~astropy.units.Quantity`.
Parameters
----------
column : str
The column name to store values.
quantity : `~astropy.units.Quantity`
The column name to store values.
"""
self._data[column] = quantity.value
self.units[column] = quantity.unit
def mask(self, column, minv, maxv):
cc = self._data[column]
c = ((cc <= minv) & (cc >= maxv))
self._data = self._data.mask(c)
return self
def filter(self, minv, maxv, column='g_result', groupby=None):
cc = self._data[column]
c = ((cc >= minv) & (cc <= maxv))
self._data = self._data[c]
return self
def split(self, *args, **kwargs):
splitted = []
for n, group in self._data.groupby(*args, **kwargs):
group_n = self.__class__(group.copy(), meta=self.meta)
splitted.append(group_n)
return splitted
def truncate(self, by=None, before=0, after=0):
data = self._data.reset_index()
data = data.groupby(by).apply(lambda x: x.iloc[before:(len(x) -
after)]).reset_index(drop=True)
self._data = data.set_index('time')
self._proc['truncate_before'] = before
self._proc['truncate_after'] = after
return self
# start corrections
def set_correction(self, name, value=0.0, **kwargs):
self._corrections[name] = (value, kwargs)
self._update_g_result()
def _update_correction(self, name):
value, kwargs = copy.deepcopy(self._corrections[name])
if hasattr(value, '__call__'):
for key in kwargs:
if isinstance(kwargs[key],
string_types) and hasattr(self.data, kwargs[key]):
#kwargs[key] = getattr(self.data, kwargs[key]).copy()
kwargs[key] = self.quantity(kwargs[key])
value = value(**kwargs)
if isinstance(value, (int, float, list, np.ndarray)):
self._data[name] = value
elif isinstance(value, pd.Series):
if isinstance(value.index, pd.DatetimeIndex):
self._data[name] = self.interpolate_from_ts(value).values
else:
self._data[name] = value.values
else:
self._data[name] = getattr(self._data, value)
def del_correction(self, name, drop=True):
if drop:
del self._data[name]
del self._corrections[name]
self._update_g_result()
def _update_g_result(self):
self._data['g_result'] = self._data.g
for key in self._corrections:
self._update_correction(key)
self._data.g_result += self.data[key]
def interpolate_from_ts(self, ts):
idx = | pd.Series(index=self._data.index) | pandas.Series |
import os
import errno
import joblib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas.plotting import table
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
r2_score,
mean_squared_error,
)
from time import time
from calculate_stats import CalculateStats
from linear_regression import LinearRegression
from multilayer_perceptron import MultilayerPerceptron
from random_forest import RandomForest
DATASET_PATH = "../datasets/train.csv"
MODELS_DIR = "../models"
RESOURCES_PATH = "../resources/"
def clean_data(path):
data_frame = pd.read_csv(path)
# fill missing data for numeric features
numeric_features = data_frame.select_dtypes(include=[np.number])
for feature in numeric_features:
data_frame[feature].fillna(data_frame[feature].mean(), inplace=True)
# convert to numeric
non_numeric_features = data_frame.select_dtypes(exclude=[np.number])
for feature in non_numeric_features:
mapping = {value: i for i, value in enumerate(data_frame[feature].unique())}
data_frame[feature] = data_frame[feature].replace(
mapping.keys(), mapping.values()
)
# dissregard unimportant features
data_frame.drop(["Id"], axis=1, inplace=True)
save_file_name = os.path.dirname(path) + os.sep + "house_prices_cleaned.csv"
data_frame.to_csv(save_file_name, encoding="utf-8", index=False)
return save_file_name
def split_data(path):
data_frame = pd.read_csv(path)
x = data_frame.loc[:, data_frame.columns != "SalePrice"]
y = data_frame.loc[:, data_frame.columns == "SalePrice"]
train_test_data = train_test_split(x, y, test_size=1 / 3, random_state=85)
dir_path = os.path.dirname(path) + os.sep
paths = [
dir_path + file_name
for file_name in [
"train_features.csv",
"test_features.csv",
"train_labels.csv",
"test_labels.csv",
]
]
for data, path in zip(train_test_data, paths):
data.to_csv(path, index=False)
return paths
def train_models(models, path, features_path, labels_path):
return [model(path, features_path, labels_path).get_path() for model in models]
def compare_results(models_paths, save_path, features_path, labels_path):
features = pd.read_csv(features_path)
labels = pd.read_csv(labels_path)
results = {
"Model": [],
"R^2": [],
"NRMSE": [],
"Latency": [],
}
for path in models_paths:
model = joblib.load(path)
start = time()
prediction = model.predict(features)
end = time()
results["Model"].append(os.path.splitext(os.path.basename(path))[0])
results["R^2"].append(round(r2_score(labels, prediction.round()), 3))
results["NRMSE"].append(
round(
np.sqrt(mean_squared_error(labels, prediction.round()))
/ np.std(prediction.round()),
3,
)
)
results["Latency"].append(round((end - start) * 1000, 1))
df = | pd.DataFrame(results) | pandas.DataFrame |
from collections import Counter, deque, namedtuple
import os
import itertools
import warnings
import copy
from operator import itemgetter
import math
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_string_dtype
import sklearn.datasets
from sklearn.metrics import f1_score
import numpy as np
import scripts.vars as my_vars
class MyException(Exception):
pass
# (ID of rule, distance of rule to the closest example) is stored per example in a named tuple
Data = namedtuple("Data", ["rule_id", "dist"])
Bounds = namedtuple("Bounds", ["lower", "upper"])
Support = namedtuple("Support", ["minority", "majority"])
Predictions = namedtuple("Predictions", ["label", "confidence"])
# Keep original rule and delete the corresponding duplicate rule which is at duplicate_idx in the list of rules
Duplicates = namedtuple("Duplicates", ["original", "duplicate", "duplicate_idx"])
def read_dataset(src, positive_class, excluded=[], skip_rows=0, na_values=[], normalize=False, class_index=-1,
header=True):
"""
Reads in a dataset in csv format and stores it in a dataFrame.
Parameters
----------
src: str - path to input dataset
positive_class: str - name of the minority class. The rest is treated as negative.
excluded: list of int - 0-based indices of columns/features to be ignored
skip_rows: int - number of rows to skip at the beginning of <src>
na_values: list of str - values encoding missing values - these are represented by NaN
normalize: bool - True if numeric values should be in range between 0 and 1
class_index: int - 0-based index where class label is stored in dataset. -1 = last column
header: bool - True if header row is included in the dataset else False
Returns
-------
pd.DataFrame, dict, pd.DataFrame, pd.DataFrame - dataset, SVDM lookup matrix which contains for nominal
classes how often the value of a feature co-occurs with each class label, initial rule set, min/max values per
numeric column
"""
my_vars.minority_class = positive_class
# Add column names
if header:
df = pd.read_csv(src, skiprows=skip_rows, na_values=na_values)
else:
df = pd.read_csv(src, skiprows=skip_rows, na_values=na_values, header=None)
df.columns = [i for i in range(len(df.columns))]
lookup = {}
# Convert fancy index to regular index - otherwise the loop below won't skip the column with class labels
if class_index < 0:
class_index = len(df.columns) + class_index
my_vars.CLASSES = df.iloc[:, class_index].unique()
class_col_name = df.columns[class_index]
rules = extract_initial_rules(df, class_col_name)
minmax = {}
# Create lookup matrix for nominal features for SVDM + normalize numerical features columnwise, but ignore labels
for col_name in df:
if col_name == class_col_name:
continue
col = df[col_name]
if is_numeric_dtype(col):
if normalize:
df[col_name] = normalize_series(col)
minmax[col_name] = {"min": col.min(), "max": col.max()}
else:
lookup[col_name] = create_svdm_lookup_column(df, col, class_col_name)
min_max = pd.DataFrame(minmax)
return df, lookup, rules, min_max
def extract_initial_rules(df, class_col_name):
"""
Creates the initial rule set for a given dataset, which corresponds to the examples in the dataset, i.e.
lower and upper bound (of numeric) features are the same. Note that for every feature in the dataset,
two values are stored per rule, namely lower and upper bound. For example, if we have
A B ... A B ...
---------- in the dataset, the corresponding rule stores: ----------------------------------
1.0 x ... Bounds(lower=1.0, upper=1.0) x
Parameters
----------
df: pd.DataFrame - dataset
class_col_name: str - name of the column holding the class labels
Returns
-------
pd.DataFrame.
Rule set
"""
rules = df.copy()
for col_name in df:
if col_name == class_col_name:
continue
col = df[col_name]
if is_numeric_dtype(col):
# Assuming the value is x, we now store named tuples of Bounds(lower=x, upper=x) per row
# rules[col_name] = [tuple([row[col_name], row[col_name]]) for _, row in df.iterrows()]
rules[col_name] = [Bounds(lower=row[col_name], upper=row[col_name]) for _, row in df.iterrows()]
return rules
def add_tags_and_extract_rules(df, k, class_col_name, counts, min_max, classes):
"""
Extracts initial rules and assigns each example in the dataset a tag, either "SAFE" or "UNSAFE", "NOISY",
"BORDERLINE".
Parameters
----------
df: pd.DataFrame - dataset
k: int - number of neighbors to consider
class_col_name: str - name of class label
counts: dict - lookup table for SVDM
min_max: pd:DataFrame - contains min/max values per numeric feature
classes: list of str - class labels in the dataset.
Returns
-------
pd.DataFrame, list of pd.Series.
Dataset with an additional column containing the tag, initially extracted rules.
"""
rules_df = extract_initial_rules(df, class_col_name)
# my_vars.latest_rule_id = rules_df.shape[0] - 1
# 1 rule per example
# assert(rules_df.shape[0] == df.shape[0])
# The next 2 lines assume that the 1st example starts with ID 0 which isn't necessarily true
# my_vars.seed_example_rule = dict((x, {x}) for x in range(rules_df.shape[0]))
# my_vars.seed_rule_example = dict((x, x) for x in range(rules_df.shape[0]))
for rule_id, _ in rules_df.iterrows():
my_vars.seed_rule_example[rule_id] = rule_id
my_vars.seed_example_rule[rule_id] = {rule_id}
# Don't store that seeds are covered by initial rules - that's given implicitly
# my_vars.examples_covered_by_rule = dict((x, {x}) for x in range(rules_df.shape[0]))
rules = []
for rule_id, rule in rules_df.iterrows():
# TODO: convert tuples into Bounds
# converted_rule = pd.Series(name=rule_id)
# for feat_name, val in rule.iteritems():
# if isinstance(val, Bounds):
# print("convert {} to Bounds".format(val))
# lower, upper = val
# converted_rule[feat_name] = Bounds(lower=lower, upper=upper)
# print(converted_rule[feat_name])
# print(isinstance(converted_rule[feat_name], Bounds))
# else:
# converted_rule[feat_name] = val
# print("converted rule")
# print(converted_rule)
rules.append(rule)
my_vars.all_rules[rule_id] = rule
tagged = add_tags(df, k, rules, class_col_name, counts, min_max, classes)
rules = deque(rules)
return tagged, rules
def add_tags(df, k, rules, class_col_name, counts, min_max, classes):
"""
Assigns each example in the dataset a tag, either "SAFE" or "UNSAFE", "NOISY", "BORDERLINE".
SAFE: example is classified correctly when looking at its k neighbors
UNSAFE: example is misclassified when looking at its k neighbors
NOISY: example is UNSAFE and all its k neighbors belong to the opposite class
BORDERLINE: example is UNSAFE and it's not NOISY.
Assumes that <df> contains at least 2 rows.
Parameters
----------
df: pd.DataFrame - dataset
k: int - number of neighbors to consider
rules: list of pd.Series - list of rules
class_col_name: str - name of class label
counts: dict - lookup table for SVDM
min_max: pd:DataFrame - contains min/max values per numeric feature
classes: list of str - class labels in the dataset.
Returns
-------
pd.DataFrame.
Dataset with an additional column containing the tag.
"""
tags = []
for rule in rules:
print(rule)
rule_id = rule.name
# Ignore current row
examples_for_pairwise_distance = df.loc[df.index != rule_id]
if examples_for_pairwise_distance.shape[0] > 0:
# print("pairwise distances for rule {}:".format(rule.name))
# print("compute distance to:\n{}".format(examples_for_pairwise_distance))
neighbors, _, _ = find_nearest_examples(examples_for_pairwise_distance, k, rule, class_col_name, counts,
min_max, classes, label_type=my_vars.ALL_LABELS,
only_uncovered_neighbors=False)
# print("neighbors:\n{}".format(neighbors))
labels = Counter(neighbors[class_col_name].values)
tag = assign_tag(labels, rule[class_col_name])
# print("=>", tag)
tags.append(tag)
df[my_vars.TAG] = pd.Series(tags)
return df
def assign_tag(labels, label):
"""
Assigns a tag to an example ("safe", "noisy" or "borderline").
Parameters
----------
labels: collections.Counter - frequency of labels
label: str - label of the example
Returns
-------
string.
Tag, either "safe", "noisy" or "borderline".
"""
total_labels = sum(labels.values())
frequencies = labels.most_common(2)
# print(frequencies)
most_common = frequencies[0]
tag = my_vars.SAFE
if most_common[1] == total_labels and most_common[0] != label:
tag = my_vars.NOISY
elif most_common[1] < total_labels:
second_most_common = frequencies[1]
# print("most common: {} 2nd most common: {}".format(most_common, second_most_common))
# Tie
if most_common[1] == second_most_common[1] or most_common[0] != label:
tag = my_vars.BORDERLINE
# print("neighbor labels: {} vs. {}".format(labels, label))
# print("tag:", tag)
return tag
def normalize_dataframe(df):
"""Normalize numeric features (=columns) using min-max normalization"""
for col_name in df.columns:
col = df[col_name]
if is_numeric_dtype(col):
min_val = col.min()
max_val = col.max()
df[col_name] = (col - min_val) / (max_val - min_val)
return df
def normalize_series(col):
"""Normalizes a given series assuming it's data type is numeric"""
if is_numeric_dtype(col):
min_val = col.min()
max_val = col.max()
col = (col - min_val) / (max_val - min_val)
return col
def create_svdm_lookup_column(df, coli, class_col_name):
"""
Create sparse lookup table for the feature representing the current column i.
N(xi), N(yi), N(xi, Kj), N(yi, Kj), is stored per nominal feature, where N(xi) and N(yi) are the numbers of
examples for which the value on i-th feature (coli) is equal to xi and yi respectively, N(xi , Kj) and N(yi,
Kj) are the numbers of examples from the decision class Kj , which belong to N(xi) and N(yi), respectively
Parameters
----------
df: pd.DataFrame - dataset.
coli: pd.Series - i-th column (= feature) of the dataset
class_col_name: str - name of class label in <df>.
Returns
-------
dict - sparse dictionary holding the non-zero counts of all values of <coli> with the class labels
"""
c = {}
nxiyi = Counter(coli.values)
c.update(nxiyi)
c[my_vars.CONDITIONAL] = {}
# print("N(xi/yi)\n", nxiyi)
unique_xiyi = nxiyi.keys()
# Create all pairwise combinations of two values
combinations = itertools.combinations(unique_xiyi, 2)
for combo in combinations:
for val in combo:
# print("current value:\n", val)
rows_with_val = df.loc[coli == val]
# print("rows with value:\n", rows_with_val)
# nxiyikj = Counter(rows_with_val.iloc[:, class_idx].values)
nxiyikj = Counter(rows_with_val[class_col_name].values)
# print("counts:\n", nxiyikj)
c[my_vars.CONDITIONAL][val] = nxiyikj
return c
def does_rule_cover_example(example, rule, dtypes):
"""
Tests if a rule covers a given example.
Parameters
----------
example: pd.Series - example
rule: pd.Series - rule
dtypes: pd.Series - data types of the respective columns in the dataset
Returns
-------
bool.
True if the rule covers the example else False.
"""
is_covered = True
for (col_name, example_val), dtype in zip(example.iteritems(), dtypes):
example_dtype = dtype
if col_name in rule:
# Cast object to tuple datatype -> this is only automatically done if it's not a string
rule_val = (rule[col_name])
# print("rule_val", rule_val, "\nrule type:", type(rule_val))
if is_string_dtype(example_dtype) and example_val != rule_val:
is_covered = False
break
elif is_numeric_dtype(example_dtype):
if rule_val[0] > example_val or rule_val[1] < example_val:
is_covered = False
break
return is_covered
def does_rule_cover_example_without_label(example, rule, dtypes, class_col_name):
"""
Tests if a rule covers a given example. In contrast to does_rule_cover_example(), the class label is ignored.
Parameters
----------
example: pd.Series - example
rule: pd.Series - rule
dtypes: pd.Series - data types of the respective columns in the dataset
class_col_name: str - name of the column in <example> that holds the class label of the example
Returns
-------
bool.
True if the rule covers the example else False.
"""
is_covered = True
for (col_name, example_val), dtype in zip(example.iteritems(), dtypes):
example_dtype = dtype
if col_name in rule and col_name != class_col_name:
# Cast object to tuple datatype -> this is only automatically done if it's not a string
rule_val = (rule[col_name])
# print("rule_val", rule_val, "\nrule type:", type(rule_val))
if | is_string_dtype(example_dtype) | pandas.api.types.is_string_dtype |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import os
import numpy as np
import pandas as pd
import requests
import json
import codecs
import matplotlib.pyplot as plt
from legcop import LegiScan
import nltk
import base64
from bs4 import BeautifulSoup
from collections import Counter
from matplotlib.ticker import FormatStrFormatter
import re
api_key = "<KEY>"
legis = LegiScan(api_key)
### create text documents
def import_text(num, path):
mylink2="https://api.legiscan.com/?key="+api_key+"&op=getBillText&id="+num
r = requests.get(mylink2)
json_response=r.json()
json_response
mydataframe=pd.DataFrame(json_response)
mynew=mydataframe['text']
mynew.to_frame()
base64_message = mynew['doc']
mydecoded = base64.b64decode(base64_message)
html = mydecoded
soup = BeautifulSoup(html, features="html.parser")
#base64_bytes = base64_message.encode('ascii')
#message_bytes = base64.b64decode(base64_bytes)
#message = message_bytes.decode('ascii')
raw = soup.get_text()
output_file = open(path+'\Output_'+num+'.txt', 'w', encoding="utf-8")
output_file.write(raw)
output_file.close()
# get the list of datasets available
def get_session_list(state):
mylink2='https://api.legiscan.com/?key="+api_key+"&op=getDatasetList&state='+state
r = requests.get(mylink2)
json_response=r.json()
json_response
global mylist
mylist= | pd.DataFrame.from_dict(json_response['datasetlist']) | pandas.DataFrame.from_dict |
import numpy as np
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
from pathlib import Path
def reject_outliers(data, m=2):
mean = np.mean(data)
std = np.std(data)
distance = abs(data - mean)
not_outlier = distance < m * std
return data[not_outlier]
def plot_visuals(agent, scores, safety, loc="./results/"):
name = agent.__class__.__name__ + "#" + str(len(scores))
Path(loc).mkdir(parents=True, exist_ok=True)
sns.set_theme(style="darkgrid")
fig, axs = plt.subplots(ncols=2, figsize=(15, 5))
scores = reject_outliers(np.array(scores))
scoredf = pd.DataFrame(
[(i, v) for i, v in enumerate(scores)], columns=["episode", "score"]
)
sns.regplot(
x="episode",
y="score",
data=scoredf,
robust=True,
ci=None,
scatter_kws={"s": 10},
ax=axs[0],
)
sns.boxplot(scores, showfliers=False, ax=axs[1])
fig.savefig(loc + name + "-Scores.png", dpi=400)
fig, ax = plt.subplots(ncols=1)
risk_rates = []
for j in safety:
safe = j.count(1)
unsafe = j.count(0)
r = 0 if unsafe == 0 else (unsafe / (safe + unsafe))
risk_rates.append(r)
ax.plot(risk_rates)
plt.savefig(loc + name + "-Safety.png", dpi=400)
def plot_comparisson(data, episodes, loc="./results/"):
df = pd.concat([ | pd.DataFrame(d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
This code generates Fig. 1
Trend of global mean surface temperature and anthropogenic aerosol emissions
by <NAME> (<EMAIL>)
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import _env
import seaborn.apionly as sns
import matplotlib
from scipy import stats
from statsmodels.tsa.stattools import adfuller as ADF
from statsmodels.tsa.stattools import acf
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Helvetica'
#import matplotlib
nens = _env.nens
parameters_info = _env.parameters_info
scenarios = _env.scenarios
par = 'TREFHT'
scen_aero = scenarios[1]
scen_base = scenarios[0]
if_temp = _env.odir_root + '/' + parameters_info[par]['dir']+ '/Global_Mean_' + par + '_1850-2019_ensembles' + '.xls'
if_temp_pi = _env.odir_root + '/' + parameters_info[par]['dir']+ '/Global_Mean_Temperature_pre-industrial_110yrs.xls'
odir_plot = _env.odir_root + '/plot/'
_env.mkdirs(odir_plot)
of_plot = odir_plot + 'F1.Trend_Temp_Emission.png'
itbl_temp = pd.read_excel(if_temp,index_col=0)
itbl_temp_pi = pd.read_excel(if_temp_pi,index_col=0)
itbl_temp = itbl_temp - float(itbl_temp_pi.mean()) #itbl_temp.iloc[0]#float(itbl_temp_pi.mean()) #273.15
#add results for differences in temeprature between two scenarios
for ens in np.arange(1,nens+1):
itbl_temp['Diff%d' % ens] = itbl_temp[scen_base + '%d' % ens] - itbl_temp[scen_aero + '%d' % ens]
#statistics
itbls_temp_20_sam = {}
itbl_temp_20_avg = pd.DataFrame(index=itbl_temp.index,columns=[scen_base,scen_aero,'Diff'])
itbl_temp_20_ste = | pd.DataFrame(index=itbl_temp.index,columns=[scen_base,scen_aero,'Diff']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 21:48:04 2021
@author: raymondlei
"""
_version='20210515'
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, RepeatedKFold
from sklearn.metrics import roc_auc_score
import lightgbm as lgb
from pandas.api.types import is_numeric_dtype
import pickle
import os
def drop_high_corr(df,col,threshold=0.95):
num_f=[i for i in col if is_numeric_dtype(df[i])==True ]
corr_matrix = df.loc[:,num_f].corr().abs()
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
to_drop = set([column for column in upper.columns if any(upper[column] > threshold)])
print('dropped '+str(len(to_drop))+' highly correlated features out of '+str(len(col))+'!')
return to_drop
class vaccine_classifier:
'''
A class designed specifically for this competition, starting from data load to create submission file.
'''
def __init__ (self
,file_name
,submit
,top_k
,run_from_beginning
,show_n=20
,repeat=5
,nfold=5
,concervative=True
,drop_corr=True
,root='/Users/raymondlei/Downloads/project/'
,debug=True
):
self.root=root
self.file_name=file_name
self.submit=submit
self.show_n=show_n
self.repeat=repeat
self.nfold=nfold
self.concervative=concervative
self.drop_corr=drop_corr
self.top_k=top_k
self.run_from_beginning=run_from_beginning
self.cur_check_name=root+'checkpoint.p'
self.new_check_name=root+'checkpoint_sucess.p'
self.debug=debug
def parameter_qc(self):
'''
A QC runs prior everything to make sure all parameters are defined as expected
'''
assert isinstance(self.root, str)
assert isinstance(self.file_name, str)
assert isinstance(self.submit, bool)
assert isinstance(self.top_k, int)
assert isinstance(self.show_n, int)
assert isinstance(self.repeat, int)
assert isinstance(self.nfold, int)
assert isinstance(self.concervative, bool)
assert isinstance(self.drop_corr, bool)
assert isinstance(self.run_from_beginning, bool)
assert isinstance(self.debug, bool)
def checkpoint(self):
with open(self.cur_check_name, 'wb') as f:
pickle.dump(self, f)
#double check to make sure saving sucessfully
os.rename(self.cur_check_name ,self.new_check_name)
print('saved!')
def resume(self):
self = pickle.load( open( self.new_check_name, "rb" ) )
print('resumed from {}'.format(self.step_log[self.log_step]))
return self
def execute(self):
'''
Execute the pipeline in sequence with check point to save a state of each step
'''
self.step_log={
#1st step
0:'self.load(step=1)'
,1:'self.create_label(step=1)'
,2:'self.cast()'
,3:'self.split()'
,4:'self.create_combination()'
,5:'self.create_num_trans()'
,6:'self.create_index(step=1)'
,7:'self.create_agg()'
,8:'self.drop_unused()'
,9:'self.feature_selection(step=1)'
,10:'self.model_train(step=1)'
#2nd step
,11:'self.load(step=2)'
,12:'self.create_label(step=2)'
,13:'self.cast()'
,14:'self.split()' #it will be the same split as 1st step
#following step will be identical to 1st step, however, the feature created will be different because of using different data
,15:'self.create_combination()'
,16:'self.create_num_trans()'
,17:'self.create_index(step=2)'
,18:'self.create_agg()'
,19:'self.drop_unused()'
#train model for 2nd step
,20:'self.feature_selection(step=2)'
,21:'self.model_train(step=2)'
}
self.log_step=1
if self.run_from_beginning==False and os.path.exists(self.new_check_name):
if self.log_step<21:
self=self.resume()
while self.log_step<max(self.step_log.keys()):
exec(self.step_log[self.log_step])
self.log_step+=1
self.checkpoint()
else:
print("Last run is a complete end-to-end run. Nothing to rerun.")
else:
if os.path.exists(self.new_check_name):
os.remove(self.new_check_name)
for k, v in self.step_log.items():
exec(v)
if k<21:
self.log_step+=1
self.checkpoint()
else:
self.checkpoint()
print("This is the end of process!")
def load (self,step):
if self.debug:
self.train_x=pd.read_csv(self.root+'train_x.csv',index_col='respondent_id').sample(frac=0.1, random_state=1)
self.test_x=pd.read_csv(self.root+'test_x.csv').sample(frac=0.1, random_state=1)
else:
self.train_x=pd.read_csv(self.root+'train_x.csv',index_col='respondent_id')
self.test_x=pd.read_csv(self.root+'test_x.csv')
self.train_y=pd.read_csv(self.root+'train_y.csv',index_col='respondent_id')
self.train= | pd.merge(left=self.train_x, right=self.train_y,how='inner',left_index=True,right_index=True) | pandas.merge |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: abnormal_detection_gaussian.py
@time: 2019-04-18 18:03
"""
import pandas as pd
from mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew
from feature_selector import FeatureSelector
if __name__ == '__main__':
mode = 4
if mode == 4:
"""
feature selector
"""
# df1 = pd.read_excel('data.xlsx').iloc[:, 1:]
# print(df1.info())
df = pd.read_excel('/Users/luoyonggui/Documents/work/dataset/0/data.xlsx')
# print(df.info())# ๆฅ็dfๅญๆฎตๅ็ผบๅคฑๅผไฟกๆฏ
label = df['็่ต็ป่ฎบ']
df = df.drop(columns=['็่ต็ป่ฎบ'])
fs = FeatureSelector(data=df, labels=label)
# ็ผบๅคฑๅผๅค็
fs.identify_missing(missing_threshold=0.6)
if mode == 3:
"""
ๅๅนถๅไฟไบบๅบๆฌไฟกๆฏ
"""
df1 = pd.read_excel('data.xlsx', 'Sheet2').dropna(axis=1, how='all')
# print(df1.info())
"""
ๅฝๅนถๅฎขๆทๅท 528 non-null int64
ๆงๅซ 528 non-null object
ๅบ็ๅนดๆๆฅ 528 non-null datetime64[ns]
ๅฉๅงป็ถๅต 432 non-null object
่ไธ 484 non-null float64
่ไธๅฑ้ฉ็ญ็บง 484 non-null float64
ๅนดๆถๅ
ฅ 528 non-null int64
ๅนดไบคไฟ่ดน 528 non-null float64
็น่ฑซๆๆคๅๆฌกๆฐ 528 non-null int64
ๆขๅพ็่ตๆฌกๆฐ 528 non-null int64
ๆขๅพๆไฟๆฌกๆฐ 528 non-null int64
ๆขๅพๅปถๆๆฟไฟๆฌกๆฐ 528 non-null int64
้ๆ ๅไฝๆฟไฟๆฌกๆฐ 528 non-null int64
ๆขๅพ่ฐๆฅๆ ่ฏ 528 non-null object
ๆขๅพไฝๆฃๆ ่ฏ 528 non-null object
็ดฏ็งฏๅฏฟ้ฉๅ้ฃ้ฉไฟ้ข 528 non-null float64
็ดฏ็งฏ้็พๅ้ฃ้ฉไฟ้ข 528 non-null float64
ๆไฟไบบๅนดๆถๅ
ฅไธๅนดไบคไฟ่ดนๆฏๅผ 437 non-null float64
่ขซไฟ้ฉไบบๆๆ้็พ้ฒ็้ฉไฟๅไปถๆฐ 528 non-null int64
่ขซไฟ้ฉไบบๆๆ็ญๆๆๅค้ฉไฟๅไปถๆฐ 528 non-null int64
่ขซไฟ้ฉไบบๆๆ็ญๆๅฅๅบท้ฉไฟๅไปถๆฐ 528 non-null int64
่ขซไฟ้ฉไบบ90ๅคฉๅ
็ๆไฟๅไปถๆฐ 528 non-null int64
่ขซไฟ้ฉไบบ180ๅคฉๅ
็ๆไฟๅไปถๆฐ 528 non-null int64
่ขซไฟ้ฉไบบ365ๅคฉๅ
็ๆไฟๅไปถๆฐ 528 non-null int64
่ขซไฟ้ฉไบบ730ๅคฉๅ
็ๆไฟๅไปถๆฐ 528 non-null int64
ๅฎขๆท้ปๅๅๆ ่ฏ 528 non-null object
ไฟๅๅคฑๆๆฅๆ 11 non-null datetime64[ns]
ไฟๅๅคๆๆฅๆ 7 non-null datetime64[ns]
ๅ็ไบบๅๆดๆฅๆ 12 non-null datetime64[ns]
"""
cols = list(df1.columns)
cols.remove('ไฟๅๅคฑๆๆฅๆ')
cols.remove('ไฟๅๅคๆๆฅๆ')
cols.remove('ๅ็ไบบๅๆดๆฅๆ')
cols.remove('ๅฎขๆท้ปๅๅๆ ่ฏ')#ๅชๆไธไธชๅผ
df1['ๅบ็ๅนด'] = df1['ๅบ็ๅนดๆๆฅ'].apply(lambda x: int(str(x)[:4]))
cols.append('ๅบ็ๅนด')
cols.remove('ๅบ็ๅนดๆๆฅ')
t = pd.get_dummies(df1['ๅฉๅงป็ถๅต'], prefix='ๅฉๅงป็ถๅต')
df2 = pd.concat([df1, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('ๅฉๅงป็ถๅต')
t = pd.get_dummies(df2['ๆงๅซ'], prefix='ๆงๅซ')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('ๆงๅซ')
t = pd.get_dummies(df2['ๆขๅพ่ฐๆฅๆ ่ฏ'], prefix='ๆขๅพ่ฐๆฅๆ ่ฏ')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('ๆขๅพ่ฐๆฅๆ ่ฏ')
t = pd.get_dummies(df2['ๆขๅพไฝๆฃๆ ่ฏ'], prefix='ๆขๅพไฝๆฃๆ ่ฏ')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('ๆขๅพไฝๆฃๆ ่ฏ')
# print(df2['่ไธ'].value_counts())
"""
ๅๅๅไฝ ๅ็ฑป
"""
df2['่ไธ'] = df2['่ไธ'].apply(lambda x: str(x)[:1])
# print(df2['่ไธ'].value_counts())
t = pd.get_dummies(df2['่ไธ'], prefix='่ไธ')
df2 = | pd.concat([df2, t], axis=1) | pandas.concat |
# pylint: disable=broad-except
# Imports: standard library
import os
import math
import time
import shutil
import logging
import multiprocessing
from typing import Dict, List
# Imports: third party
import pandas as pd
# Imports: first party
from definitions.icu import ICU_SCALE_UNITS
from tensorize.utils import save_mrns_and_csns_csv
from tensorize.bedmaster.readers import BedmasterReader, CrossReferencer
from tensorize.bedmaster.writers import Writer
from tensorize.bedmaster.match_patient_bedmaster import PatientBedmasterMatcher
class Tensorizer:
"""
Main class to tensorize the Bedmaster data.
"""
def __init__(
self,
bedmaster: str,
xref: str,
adt: str,
):
"""
Initialize Tensorizer object.
:param bedmaster: <str> Directory containing all the Bedmaster data.
:param xref: <str> Full path of the file containing
cross reference between EDW and Bedmaster.
:param adt: <str> Full path of the file containing
the adt patients' info to be tensorized.
"""
self.bedmaster = bedmaster
self.xref = xref
self.adt = adt
self.untensorized_files: Dict[str, List[str]] = {"file": [], "error": []}
def tensorize(
self,
tensors: str,
mrns: List[str] = None,
starting_time: int = None,
ending_time: int = None,
overwrite_hd5: bool = True,
n_patients: int = None,
num_workers: int = None,
):
"""
Tensorizes Bedmaster data.
It will create a new HD5 for each MRN with the integrated data
from both sources according to the following structure:
<BedMaster>
<visitID>/
<signal name>/
data and metadata
...
...
:param tensors: <str> Directory where the output HD5 will be saved.
:param mrns: <List[str]> MGH MRNs. The rest will be filtered out.
If None, all the MRN are taken
:param starting_time: <int> Start time in Unix format.
If None, timestamps will be taken from the first one.
:param ending_time: <int> End time in Unix format.
If None, timestamps will be taken until the last one.
:param overwrite_hd5: <bool> Overwrite existing HD5 files during tensorization
should be overwritten.
:param n_patients: <int> Max number of patients to tensorize.
:param num_workers: <int> Number of cores used to parallelize tensorization
"""
# No options specified: get all the cross-referenced files
files_per_mrn = CrossReferencer(
self.bedmaster,
self.xref,
self.adt,
).get_xref_files(
mrns=mrns,
starting_time=starting_time,
ending_time=ending_time,
overwrite_hd5=overwrite_hd5,
n_patients=n_patients,
tensors=tensors,
)
os.makedirs(tensors, exist_ok=True)
with multiprocessing.Pool(processes=num_workers) as pool:
pool.starmap(
self._main_write,
[
(tensors, mrn, visits, ICU_SCALE_UNITS)
for mrn, visits in files_per_mrn.items()
],
)
df = pd.DataFrame.from_dict(self.untensorized_files)
df.to_csv(
os.path.join(tensors, "untensorized_bedmaster_files.csv"),
index=False,
)
def _main_write(
self,
tensors: str,
mrn: str,
visits: Dict,
scaling_and_units: Dict,
):
try:
# Open the writer: one file per MRN
output_file = os.path.join(tensors, f"{mrn}.hd5")
with Writer(output_file) as writer:
writer.write_completed_flag(False)
for visit_id, bedmaster_files in visits.items():
# Set the visit ID
writer.set_visit_id(visit_id)
# Write Bedmaster data
all_files, untensorized_files = self._write_bedmaster_data(
bedmaster_files,
writer=writer,
scaling_and_units=scaling_and_units,
)
self.untensorized_files["file"].append(untensorized_files["file"])
self.untensorized_files["error"].append(untensorized_files["error"])
writer.write_completed_flag(all_files)
logging.info(
f"Tensorized data from MRN {mrn}, CSN {visit_id} into "
f"{output_file}.",
)
except Exception as error:
logging.exception(f"Tensorization failed for MRN {mrn} with CSNs {visits}")
raise error
@staticmethod
def _write_bedmaster_data(
bedmaster_files: List[str],
writer: Writer,
scaling_and_units: Dict,
):
all_files = True
previous_max = None
untensorized_files: Dict[str, List[str]] = {"file": [], "error": []}
for bedmaster_file in bedmaster_files:
try:
with BedmasterReader(bedmaster_file, scaling_and_units) as reader:
if previous_max:
reader.get_interbundle_correction(previous_max)
# These blocks can be easily parallelized with MPI:
# >>> rank = MPI.COMM_WORLD.rank
# >>> if rank == 1:
vs_signals = reader.list_vs()
for vs_signal_name in vs_signals:
vs_signal = reader.get_vs(vs_signal_name)
if vs_signal:
writer.write_signal(vs_signal)
# >>> if rank == 2
wv_signals = reader.list_wv()
for wv_signal_name, channel in wv_signals.items():
wv_signal = reader.get_wv(channel, wv_signal_name)
if wv_signal:
writer.write_signal(wv_signal)
previous_max = reader.max_segment
except Exception as error:
untensorized_files["file"].append(bedmaster_file)
untensorized_files["error"].append(repr(error))
if len(untensorized_files["file"]) > 0:
all_files = False
return all_files, untensorized_files
def create_folders(staging_dir: str):
"""
Create temp folders for tensorization.
:param staging_dir: <str> Path to temporary local directory.
"""
os.makedirs(staging_dir, exist_ok=True)
os.makedirs(os.path.join(staging_dir, "bedmaster_temp"), exist_ok=True)
def stage_bedmaster_files(
staging_dir: str,
adt: str,
xref: str,
):
"""
Find Bedmaster files and copy them to local folder.
:param staging_dir: <str> Path to temporary staging directory.
:param adt: <str> Path to CSN containing ADT table.
:param xref: <str> Path to xref.csv with Bedmaster metadata.
"""
path_patients = os.path.join(staging_dir, "patients.csv")
mrns_and_csns = pd.read_csv(path_patients)
mrns = mrns_and_csns["MRN"].drop_duplicates()
# Copy ADT table
adt_df = pd.read_csv(adt)
adt_subset = adt_df[adt_df["MRN"].isin(mrns)]
path_adt_new = os.path.join(staging_dir, "bedmaster_temp", "adt.csv")
adt_subset.to_csv(path_adt_new, index=False)
# Copy xref table
xref_df = pd.read_csv(xref).sort_values(by=["MRN"], ascending=True)
xref_subset = xref_df[xref_df["MRN"].isin(mrns)]
path_xref_new = os.path.join(staging_dir, "bedmaster_temp", "xref.csv")
xref_subset.to_csv(path_xref_new, index=False)
# Iterate over all Bedmaster file paths to copy to staging directory
path_destination_dir = os.path.join(staging_dir, "bedmaster_temp")
for path_source_file in xref_subset["path"]:
if os.path.exists(path_source_file):
try:
shutil.copy(path_source_file, path_destination_dir)
except FileNotFoundError as e:
logging.warning(f"{path_source_file} not found. Error given: {e}")
else:
logging.warning(f"{path_source_file} not found.")
def cleanup_staging_files(staging_dir: str):
"""
Remove temp folders used for tensorization.
:param staging_dir: <str> Path to temporary local directory.
"""
shutil.rmtree(os.path.join(staging_dir, "bedmaster_temp"))
shutil.rmtree(os.path.join(staging_dir, "results_temp"))
os.remove(os.path.join(staging_dir, "patients.csv"))
def copy_hd5(staging_dir: str, destination_tensors: str, num_workers: int):
"""
Copy tensorized files to MAD3.
:param staging_dir: <str> Path to temporary local directory.
:param destination_tensors: <str> Path to MAD3 directory.
:param num_workers: <int> Number of workers to use.
"""
init_time = time.time()
list_files = os.listdir(staging_dir)
with multiprocessing.Pool(processes=num_workers) as pool:
pool.starmap(
_copy_hd5,
[(staging_dir, destination_tensors, file) for file in list_files],
)
elapsed_time = time.time() - init_time
logging.info(
f"HD5 files copied to {destination_tensors}. "
f"Process took {elapsed_time:.2f} sec",
)
def _copy_hd5(staging_dir, destination_dir, file):
source_path = os.path.join(staging_dir, file)
shutil.copy(source_path, destination_dir)
def tensorize(args):
# Cross reference ADT table against Bedmaster metadata;
# this results in the creation of xref.csv
if not os.path.isfile(args.xref):
matcher = PatientBedmasterMatcher(
bedmaster=args.bedmaster,
adt=args.adt,
)
matcher.match_files(xref=args.xref)
# Iterate over batch of patients
missed_patients = []
num_mrns_tensorized = []
# If user does not set the end index,
if args.mrn_end_index is None:
adt_df = | pd.read_csv(args.adt) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from dask.dataframe.hashing import hash_pandas_object
from dask.dataframe.utils import assert_eq
@pytest.mark.parametrize('obj', [
pd.Series([1, 2, 3]),
pd.Series([1.0, 1.5, 3.2]),
pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
pd.Series(['a', 'b', 'c']),
| pd.Series([True, False, True]) | pandas.Series |
import warnings
from functools import reduce
import os
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
from qualipy.project import Project
from qualipy.util import set_value_type, set_metric_id
from qualipy.anomaly._isolation_forest import IsolationForestModel
from qualipy.anomaly._prophet import ProphetModel
from qualipy.anomaly._std import STDCheck
from qualipy.anomaly.base import LoadedModel
from qualipy.anomaly.trend_rules import trend_rules
anomaly_columns = [
"column_name",
"date",
"metric",
"arguments",
"return_format",
"value",
"severity",
"batch_name",
"insert_time",
"trend_function_name",
]
MODS = {
"IsolationForest": IsolationForestModel,
"prophet": ProphetModel,
"std": STDCheck,
}
class GenerateAnomalies:
def __init__(self, project_name, config_dir):
self.config_dir = config_dir
with open(os.path.join(config_dir, "config.json"), "r") as conf_file:
config = json.load(conf_file)
self.model_type = config[project_name].get("ANOMALY_MODEL", "std")
self.anom_args = config[project_name].get("ANOMALY_ARGS", {})
self.specific = self.anom_args.pop("specific", {})
self.project_name = project_name
self.project = Project(project_name, config_dir=config_dir, re_init=True)
df = self.project.get_project_table()
df["floored_datetime"] = df.date.dt.floor("T")
df = (
df.groupby("floored_datetime", as_index=False)
.apply(lambda g: g[g.insert_time == g.insert_time.max()])
.reset_index(drop=True)
)
df = df.drop("floored_datetime", axis=1)
df.column_name = df.column_name + "_" + df.run_name
df["metric_name"] = (
df.column_name
+ "_"
+ df.metric.astype(str)
+ "_"
+ np.where(df.arguments.isnull(), "", df.arguments)
)
df = set_metric_id(df)
df = df.sort_values("date")
self.df = df
def _num_train_and_save(self, data, all_rows, metric_name):
try:
metric_id = data.metric_id.iloc[0]
mod = MODS[self.model_type](
config_dir=self.config_dir,
metric_name=metric_id,
project_name=self.project_name,
)
mod.fit(data)
mod.save()
preds = mod.predict(data)
if isinstance(preds, tuple):
severity = preds[1]
preds = preds[0]
outlier_rows = data[preds == -1].copy()
outlier_rows["severity"] = severity[preds == -1]
else:
outlier_rows = data[preds == -1]
outlier_rows["severity"] = np.NaN
if outlier_rows.shape[0] > 0:
all_rows.append(outlier_rows)
except Exception as e:
print(str(e))
warnings.warn(f"Unable to create anomaly model for {metric_name}")
return all_rows
def _num_from_loaded_model(self, data, all_rows):
mod = LoadedModel(config_dir=self.config_dir)
mod.load(data.metric_id.iloc[0])
preds = mod.predict(data)
if isinstance(preds, tuple):
severity = preds[1]
preds = preds[0]
outlier_rows = data[preds == -1].copy()
outlier_rows["severity"] = severity[preds == -1]
else:
outlier_rows = data[preds == -1]
outlier_rows["severity"] = np.NaN
if outlier_rows.shape[0] > 0:
all_rows.append(outlier_rows)
return all_rows
def create_anom_num_table(self, retrain=False):
df = self.df.copy()
df = df[
(df["type"] == "numerical")
| (df["column_name"].isin(["rows", "columns"]))
| (df["metric"].isin(["perc_missing", "count"]))
]
df.value = df.value.astype(float)
all_rows = []
if self.model_type != "ignore":
for metric_name, data in tqdm(df.groupby("metric_name")):
if not retrain:
try:
all_rows = self._num_from_loaded_model(data, all_rows)
except ValueError:
warnings.warn(f"Unable to load anomaly model for {metric_name}")
except FileNotFoundError:
all_rows = self._num_train_and_save(data, all_rows, metric_name)
else:
all_rows = self._num_train_and_save(data, all_rows, metric_name)
try:
data = pd.concat(all_rows).sort_values("date", ascending=False)
data["trend_function_name"] = np.NaN
data = data[anomaly_columns]
data.value = data.value.astype(str)
except:
data = pd.DataFrame([], columns=anomaly_columns)
return data
def create_anom_cat_table(self, retrain=False):
df = self.df
df = df[df["type"] == "categorical"]
all_rows = []
if self.model_type != "ignore":
for metric_id, data in tqdm(df.groupby("metric_id")):
data = set_value_type(data.copy())
try:
data_values = [
(pd.Series(c) / pd.Series(c).sum()).to_dict()
for c in data["value"]
]
unique_vals = reduce(
lambda x, y: x.union(y), [set(i.keys()) for i in data_values]
)
non_diff_lines = []
potential_lines = []
for cat in unique_vals:
values = pd.Series([i.get(cat, 0) for i in data_values])
running_means = values.rolling(window=5).mean()
differences = values - running_means
sum_abs = np.abs(differences).sum()
potential_lines.append((cat, differences, sum_abs))
non_diff_lines.append((cat, values))
potential_lines = sorted(
potential_lines, key=lambda v: v[2], reverse=True
)
diffs_df = pd.DataFrame({i[0]: i[1] for i in potential_lines})
diffs_df["sum_of_changes"] = diffs_df.abs().sum(axis=1)
all_non_diff_lines = pd.DataFrame(
{i[0]: i[1] for i in non_diff_lines}
)
for col in all_non_diff_lines.columns:
mean = all_non_diff_lines[col].mean()
std = all_non_diff_lines[col].std()
if std > 0.05:
all_non_diff_lines[f"{col}_below"] = np.where(
all_non_diff_lines[col] < (mean - (4 * std)), 1, 0
)
all_non_diff_lines[f"{col}_above"] = np.where(
all_non_diff_lines[col] > (mean + (4 * std)), 1, 0
)
else:
all_non_diff_lines[f"{col}_below"] = 0
all_non_diff_lines[f"{col}_above"] = 0
std_sums = all_non_diff_lines[
[
col
for col in all_non_diff_lines.columns
if "_below" in str(col) or "_above" in str(col)
]
].sum(axis=1)
mod = IsolationForestModel(
config_dir=self.config_dir,
metric_name=metric_id,
arguments={
"contamination": 0.01,
"n_estimators": 50,
"multivariate": True,
"check_for_std": True,
},
)
outliers = mod.train_predict(all_non_diff_lines)
all_non_diff_lines["iso_outlier"] = outliers
data["severity"] = diffs_df.sum_of_changes.values
sample_size = data.value.apply(lambda v: sum(v.values()))
outlier_rows = data[
(outliers == -1) & (std_sums.values > 0) & (sample_size > 10)
]
if outlier_rows.shape[0] > 0:
all_rows.append(outlier_rows)
except ValueError:
pass
try:
data = pd.concat(all_rows).sort_values("date", ascending=False)
data["trend_function_name"] = np.NaN
data = data[anomaly_columns]
data.value = data.value.astype(str)
except:
data = pd.DataFrame([], columns=anomaly_columns)
return data
def create_error_check_table(self):
# obv only need to do this once
df = self.df
df = df[df["type"] == "boolean"]
if df.shape[0] > 0:
df = set_value_type(df)
df = df[~df.value]
df["severity"] = np.NaN
df["trend_function_name"] = np.NaN
df = df[anomaly_columns]
else:
df = pd.DataFrame([], columns=anomaly_columns)
return df
def create_trend_rule_table(self):
# obv only need to do this once
df = self.df
if len(self.specific) > 0:
all_rows = []
df = df[df.metric_id.isin(self.specific)]
for metric_id, group in df.groupby("metric_id"):
trend_functions = self.specific[metric_id]
group = set_value_type(group)
for fun, items in trend_functions.items():
outlier_data = trend_rules[fun]["function"](group.copy())
if outlier_data.shape[0] > 0:
outlier_data["severity"] = items.get("severity", np.NaN)
outlier_data["trend_function_name"] = fun
all_rows.append(outlier_data)
if len(all_rows) > 0:
data = | pd.concat(all_rows) | pandas.concat |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = | pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6']) | pandas.Series |
import os
import cv2
import torch
import pydicom
import numpy as np
import pandas as pd
from PIL import Image
from torch.utils.data import Dataset
from albumentations import ShiftScaleRotate, Normalize, Resize, Compose
from albumentations.pytorch import ToTensor
from gloria.constants import *
class ImageBaseDataset(Dataset):
def __init__(
self,
cfg,
split="train",
transform=None,
):
self.cfg = cfg
self.transform = transform
self.split = split
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def read_from_jpg(self, img_path):
x = cv2.imread(str(img_path), 0)
# tranform images
x = self._resize_img(x, self.cfg.data.image.imsize)
img = Image.fromarray(x).convert("RGB")
if self.transform is not None:
img = self.transform(img)
return img
def read_from_dicom(self, img_path):
raise NotImplementedError
def _resize_img(self, img, scale):
"""
Args:
img - image as numpy array (cv2)
scale - desired output image-size as scale x scale
Return:
image resized to scale x scale with shortest dimension 0-padded
"""
size = img.shape
max_dim = max(size)
max_ind = size.index(max_dim)
# Resizing
if max_ind == 0:
# image is heigher
wpercent = scale / float(size[0])
hsize = int((float(size[1]) * float(wpercent)))
desireable_size = (scale, hsize)
else:
# image is wider
hpercent = scale / float(size[1])
wsize = int((float(size[0]) * float(hpercent)))
desireable_size = (wsize, scale)
resized_img = cv2.resize(
img, desireable_size[::-1], interpolation=cv2.INTER_AREA
) # this flips the desireable_size vector
# Padding
if max_ind == 0:
# height fixed at scale, pad the width
pad_size = scale - resized_img.shape[1]
left = int(np.floor(pad_size / 2))
right = int(np.ceil(pad_size / 2))
top = int(0)
bottom = int(0)
else:
# width fixed at scale, pad the height
pad_size = scale - resized_img.shape[0]
top = int(np.floor(pad_size / 2))
bottom = int(np.ceil(pad_size / 2))
left = int(0)
right = int(0)
resized_img = np.pad(
resized_img, [(top, bottom), (left, right)], "constant", constant_values=0
)
return resized_img
class CheXpertImageDataset(ImageBaseDataset):
def __init__(self, cfg, split="train", transform=None, img_type="Frontal"):
if CHEXPERT_DATA_DIR is None:
raise RuntimeError(
"CheXpert data path empty\n"
+ "Make sure to download data from:\n"
+ " https://stanfordmlgroup.github.io/competitions/chexpert/"
+ f" and update CHEXPERT_DATA_DIR in ./gloria/constants.py"
)
self.cfg = cfg
# read in csv file
if split == "train":
self.df = pd.read_csv(CHEXPERT_TRAIN_CSV)
elif split == "valid":
self.df = pd.read_csv(CHEXPERT_VALID_CSV)
else:
self.df = | pd.read_csv(CHEXPERT_TEST_CSV) | pandas.read_csv |
"""
Script to get the distribution of defined percentage of most important ingredients according to their categories.
Needs a working MongoDB server containing the Open Food Facts database.
"""
import pandas as pd
import pymongo
import tqdm
from data import INGREDIENTS_DISTRIBUTION_FILEPATH
# Minimum number of defined percentage per ingredient
MIN_VALUE_NB = 30
df = | pd.DataFrame(columns=['id', 'percent', 'categories_tags']) | pandas.DataFrame |
from concurrent import futures
import time
import grpc
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S', level=logging.DEBUG)
import temperature_bands_pb2
import temperature_bands_pb2_grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# getting the utils file here
import os, sys
import xbos_services_utils3 as utils
import datetime
import pytz
import numpy as np
import pandas as pd
import yaml
import traceback
from pathlib import Path
DAYS_IN_WEEK = 7
TEMPERATURE_BANDS_DATA_PATH = Path(os.environ["TEMPERATURE_BANDS_DATA_PATH"])
TEMPERATURE_BANDS_HOST_ADDRESS = os.environ["TEMPERATURE_BANDS_HOST_ADDRESS"]
def _get_temperature_band_config(building, zone):
band_path = str(TEMPERATURE_BANDS_DATA_PATH / building / (zone + ".yml"))
if os.path.exists(band_path):
with open(band_path, "r") as f:
try:
config = yaml.load(f)
except yaml.YAMLError:
return None, "yaml could not read file at: %s" % band_path
else:
return None, "consumption file could not be found. path: %s." % band_path
return config, None
def _get_week_comfortband(building, zone, date, interval):
"""
Gets the whole comfortband from the zone configuration file. Correctly Resamples the data according to interval
:param date: The date for which we want to start the week. Timezone aware.
:param interval: int:seconds. The interval/frequency of resampling. Has to be such that 60 % interval == 0
:return: pd.df (col = "t_low", "t_high") with time_series index for the date provided and in timezone aware and in timezone of data input.
"""
config, err = _get_temperature_band_config(building, zone)
if config is None:
return None, err
# Set the date to the controller timezone.
building_date = date.astimezone(tz=pytz.timezone(config["tz"]))
weekday = building_date.weekday()
list_data = []
comfortband_data = config["comfortband"]
df_do_not_exceed, err = _get_week_do_not_exceed(building, zone, building_date, interval)
if df_do_not_exceed is None:
return None, err
# Note, we need to get a day before the start and after the end of the week to correctly resample due to timezones.
for i in range(DAYS_IN_WEEK + 2):
curr_weekday = (weekday + i - 1) % DAYS_IN_WEEK
curr_day = building_date + datetime.timedelta(days=i - 1)
curr_idx = []
curr_comfortband = []
weekday_comfortband = np.array(comfortband_data[curr_weekday])
for interval_comfortband in weekday_comfortband:
start, end, t_low, t_high = interval_comfortband
start = utils.combine_date_time(start, curr_day)
if t_low is None or t_low == "None":
interval_safety = df_do_not_exceed[start-datetime.timedelta(seconds=interval):start]
t_low = interval_safety["t_low"].mean() # TODO We want mean weighter by duration. Fine approximation for now
if t_high is None or t_high == "None":
interval_safety = df_do_not_exceed[start-datetime.timedelta(seconds=interval):start]
t_high = interval_safety["t_high"].mean()
curr_idx.append(start)
curr_comfortband.append({"t_low": float(t_low),
"t_high": float(t_high)})
list_data.append(pd.DataFrame(index=curr_idx, data=curr_comfortband))
df_comfortband = | pd.concat(list_data) | pandas.concat |
#!/usr/bin/python
#Configuration items
import numpy as np
import pandas as pd
######################
#Character Sheet Items
class Ab_Sco:
def __init__(self, Str, Dex, Con, Int, Wis, Cha):
self.Str = Str
self.Dex = Dex
self.Con = Con
self.Int = Int
self.Wis = Wis
self.Cha = Cha
Ability_Scores = Ab_Sco(0, 0, 0, 0, 0, 0)
Size = ""
Level = 1
Class = ""
Race = ""
class Background:
def __init__(self, trait, ideal, bond, flaw):
self.trait = trait
self.ideal = ideal
self.bond = bond
self.flaw = flaw
bg = Background("", "", "", "")
Alignment = ""
Exp = 0
Prof_Bonus = 0
#Format: [IsProficient, Score]
SaveThrow = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
#Format: [Name, IsProficient, Score]
Skills = [["Acrobatics", 0, 0],["Animal Handling", 0, 0],["Arcana", 0, 0],["Athletics", 0, 0],["Deception", 0, 0],["History", 0, 0],["Insight", 0, 0],["Intimidation", 0, 0],["Investigation", 0, 0],["Medicine", 0, 0],["Nature", 0, 0],["Perception", 0, 0],["Performance", 0, 0],["Persuasion", 0, 0],["Religion", 0, 0],["Sleight of Hand", 0, 0],["Stealth", 0, 0],["Survival", 0, 0]]
#Passive_Percept = 0 #Passive Perception is just 10 + perception mod --> can be calculated on the fly
AC = 0
Initiative = 0
Speed = 0
Max_HP = 0 #No need to calculate current/temp --> generating characters default to max hp
Hit_Die_Type = ""
Hit_Die_Num = 0
Equipment = pd.DataFrame(columns=['Name'])
class Currency:
def __init__(self, cp, sp, ep, gp, pp):
self.cp = cp
self.sp = sp
self.ep = ep
self.gp = gp
self.pp = pp
Money = Currency(0, 0, 0, 0, 0)
class Proficiencies:
def __init__(self, armor, weapon, language, tool):
self.armor = armor
self.weapon = weapon
self.language = language
self.tool = tool
Prof = Proficiencies([], [], [], [])
Features = pd.DataFrame(columns=['Name', 'Desc'])
Spells0 = pd.DataFrame(columns=['Prep', 'Name'])
Spells1 = pd.DataFrame(columns=['Prep', 'Name'])
Spells2 = pd.DataFrame(columns=['Prep', 'Name'])
Spells3 = pd.DataFrame(columns=['Prep', 'Name'])
Spells4 = pd.DataFrame(columns=['Prep', 'Name'])
Spells5 = pd.DataFrame(columns=['Prep', 'Name'])
Spells6 = pd.DataFrame(columns=['Prep', 'Name'])
Spells7 = pd.DataFrame(columns=['Prep', 'Name'])
Spells8 = | pd.DataFrame(columns=['Prep', 'Name']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
def getcount(x):
return x.count()
def getset(x):
return len(set(x))
def FeatSection(data,label):
'Featureๅบ้ด็นๅพ'
'็จๆท้ขๅไผๆ ๅธๆฌกๆฐๅ็ง็ฑป'
uc_cnt_set = pd.pivot_table(data,index='user_id',values='coupon_id',aggfunc=[getcount,getset]).reset_index()
uc_cnt_set.columns = ['user_id','uc_cnt','uc_set']
label = pd.merge(label,uc_cnt_set,on='user_id',how='left')
usecp = data[data['date'].isnull() == False] # ๆ ธ้ไผๆ ๅธ็ๆฐๆฎ
dropcp = data[data['date'].isnull() == True] # ไธๆ ธ้ไผๆ ๅธ็ๆฐๆฎ
'็จๆทๆ ธ้ไผๆ ๅธๆฌกๆฐๅ็ง็ฑป'
uusec_cnt_set = pd.pivot_table(usecp,index='user_id',values='coupon_id',aggfunc=[getcount,getset]).reset_index()
uusec_cnt_set.columns = ['user_id','uusec_cnt','uusec_set']
label = pd.merge(label,uusec_cnt_set,on='user_id',how='left')
'็จๆท้ขๅๅๅฎถ็็ง็ฑป'
um_set = | pd.pivot_table(data,index='user_id',values='merchant_id',aggfunc=getset) | pandas.pivot_table |
import pandas as pd
import os
def prepare_legends(mean_models, models, interpretability_name):
bars = []
y_pos = []
index_bars = 0
for nb, i in enumerate(mean_models):
if nb % len(models) == int(len(models)/2):
bars.append(interpretability_name[index_bars])
index_bars += 1
else:
bars.append('')
if nb < len(mean_models)/len(interpretability_name):
y_pos.append(nb)
elif nb < 2*len(mean_models)/len(interpretability_name):
y_pos.append(nb+1)
elif nb < 3*len(mean_models)/len(interpretability_name):
y_pos.append(nb+2)
elif nb < 4*len(mean_models)/len(interpretability_name):
y_pos.append(nb+3)
elif nb < 5*len(mean_models)/len(interpretability_name):
y_pos.append(nb+4)
else:
y_pos.append(nb+5)
colors = ['black', 'red', 'green', 'blue', 'yellow', 'grey', 'purple', 'cyan', 'gold', 'brown']
color= []
for nb, model in enumerate(models):
color.append(colors[nb])
return color, bars, y_pos
class store_experimental_informations(object):
"""
Class to store the experimental results of precision, coverage and F1 score for graph representation
"""
def __init__(self, len_models, len_interpretability_name, columns_name_file1, nb_models, columns_name_file2=None, columns_name_file3=None, columns_multimodal=None):
"""
Initialize all the variable that will be used to store experimental results
Args: len_models: Number of black box models that we are explaining during experiments
len_interpretability_name: Number of explanation methods used to explain each model
interpretability_name: List of the name of the explanation methods used to explain each model
"""
columns_name_file2 = columns_name_file1 if columns_name_file2 is None else columns_name_file2
columns_name_file3 = columns_name_file1 if columns_name_file3 is None else columns_name_file3
columns_name_file4 = columns_name_file1
self.multimodal_columns = ["LS", "LSe log", "LSe lin", "Anchors", 'APE SI', 'APE CF', 'APE FOLD', 'APE FULL', 'APE FULL pvalue', "DT", "Multimodal",
"radius", "fr pvalue", "cf pvalue", "separability", "fr fold",
"cf fold", "SI bon", "CF bon", "fold bon", "ape bon", "ape pvalue bon", "bb"] if columns_multimodal == None else columns_multimodal
self.columns_name_file1 = columns_name_file1
self.columns_name_file2 = columns_name_file2
self.columns_name_file3 = columns_name_file3
self.columns_name_file4 = columns_name_file4
self.len_interpretability_name, self.len_models, = len_interpretability_name, len_models
self.nb_models = nb_models - 1
self.pd_all_models_results1 = pd.DataFrame(columns=columns_name_file1)
self.pd_all_models_results2 = pd.DataFrame(columns=columns_name_file2)
self.pd_all_models_results3 = pd.DataFrame(columns=columns_name_file3)
self.pd_all_models_results4 = pd.DataFrame(columns=columns_name_file4)
self.pd_all_models_multimodal = pd.DataFrame(columns=self.multimodal_columns)
def initialize_per_models(self, filename):
self.filename = filename
os.makedirs(os.path.dirname(self.filename), exist_ok=True)
self.pd_results1 = pd.DataFrame(columns=self.columns_name_file1)
self.pd_results2 = pd.DataFrame(columns=self.columns_name_file2)
self.pd_results3 = pd.DataFrame(columns=self.columns_name_file3)
self.pd_results4 = pd.DataFrame(columns=self.columns_name_file4)
self.pd_multimodal = pd.DataFrame(columns=self.multimodal_columns)
def store_experiments_information_instance(self, results1, filename1, results2=None, filename2=None, results3=None,
filename3=None, results4=None, filename4=None, multimodal=None, multimodal_filename="multimodal.csv"):
"""
Store precisions, coverages, f2s and multimodal results inside dictionary
Args: precisions: list of precision result for each explanation method on a single instance
coverages: list of coverage result for each explanation method on a single instance
f2s: list of f2 score for each explanation method on a single instance
multimodal: 1 if APE selected a multimodal distribution, otherwise 0
"""
self.pd_results1 = self.pd_results1.append( | pd.DataFrame([results1], columns=self.columns_name_file1) | pandas.DataFrame |
import warnings
from datetime import datetime, timedelta
import pandas as pd
import psycopg2
class MarketDataCleaner(object):
"""Get data from main_market table and preprocess it into pandas.Dataframe"""
def __init__(self):
# DB connection and cursor instances.
self.conn = psycopg2.connect()
def clean(self):
# Load all rows from the main_price.
market_df = self._get_df()
# Convert all the datetimes to UTC time zone.
market_df['date'] = pd.to_datetime(market_df['date'], utc=True)
# Add day and hour columns for better work with date.
market_df['daycol'] = market_df['date'].dt.date
market_df['hourcol'] = market_df['date'].dt.hour
# Remove data points which share the same date&hour.
print('Start removing data points with same date and hour')
ids_to_drop = []
grouped_by_dayhour = market_df.groupby(['daycol', 'hourcol'])
for _, df in grouped_by_dayhour:
if df.shape[0] != 1:
for value in df.index.values[1:]:
ids_to_drop.append(value)
market_df = market_df.drop(ids_to_drop)
# Check if there are Null values.
print('There are {0} NA values main_market'.format(
market_df.isnull().sum().sum()))
# Compare with real hourly data points - fill missing values.
cur_date = datetime.now()
finish_date = datetime(2016, 1, 1)
hour_timedelta = timedelta(hours=1)
while cur_date > finish_date:
filter_day = market_df['daycol'] == cur_date.date()
filter_hour = market_df['hourcol'] == cur_date.hour
if market_df[filter_day & filter_hour].empty:
print(
'Found empty value from market_data at {0}'.format(cur_date))
df_to_add_data = {
'date': [cur_date],
'globalmarketcap': [market_df[filter_day].mean()['globalmarketcap']],
'mchoursentiment': [market_df[filter_day].mean()['mchoursentiment']],
'mchourprediction': [market_df[filter_day].mean()['mchourprediction']],
'mchourtrend': [market_df[filter_day].mean()['mchourtrend']],
'globalvolume': [market_df[filter_day].mean()['globalvolume']],
'daycol': [cur_date.date()],
'hourcol': [cur_date.hour]
}
df_to_add = pd.DataFrame(df_to_add_data)
market_df.append(df_to_add, ignore_index=True)
cur_date -= hour_timedelta
# Return cleaned data.
return market_df
def _get_df(self):
select_query = """select * from main_market;"""
data_df = pd.read_sql_query(select_query, self.conn, index_col='id')
return data_df
class PriceDataCleaner(object):
"""Get data from main_market table and preprocess it into pandas.Dataframe"""
def __init__(self):
# DB connection and cursor instances.
self.conn = psycopg2.connect(
user="sentimentappadmin", password="<PASSWORD>", host="127.0.0.1", database="sentimentappdb")
def clean(self):
# Load all rows from the main_price.
prices_df = self._get_df()
# Convert all the datetimes to UTC time zone.
prices_df['date'] = | pd.to_datetime(prices_df['date'], utc=True) | pandas.to_datetime |
# Objective function to optimize
# sklearn
import math
import os
import time
import matplotlib.pyplot as plt
import numpy as np
# Various
import pandas as pd
import json
import sys
# tensorflow
import tensorflow as tf
# fix for reproducibility
tf.random.set_seed(42)
# sklearn
from sklearn import pipeline
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras import backend as k
# utilities
from data import load_data
from modeling import network
from preprocessing import build_data
def weibull_mean(alpha, beta):
return alpha * math.gamma(1 + 1 / beta)
model = None
def obj_function(net_cfg, cfg=None):
if cfg == None:
cfg = {
"shuffle": True,
"random_state": 21,
"mask_value": -99,
"reps": 30,
"epochs": 100,
"batches": 64,
}
# deleting model if it exists
k.clear_session()
(
train_x_orig,
feature_cols,
vld_trunc,
vld_x_orig,
original_len,
test_x_orig,
test_y_orig,
) = load_data()
# Uncomment for debugging
# print(train_x_orig.shape)
# print(vld_trunc.shape)
# print(original_len.shape)
rmse_train = []
r2_train = []
mae_train = []
std_train = []
rmse_test = []
r2_test = []
mae_test = []
std_test = []
train_all = []
test_all = []
file = "results_no_cv_HO_28_1"
columns = [
"rmse_train",
"mae_train",
"r2_train",
"uncertainty_train",
"rmse_test",
"mae_test",
"r2_test",
"uncertainty_test",
"net_cfg",
]
results = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import sys
import re
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import MultiLabelBinarizer
from scipy.spatial.distance import cdist
from colorama import Fore, Style
from kneed import KneeLocator
import copy
import time
import pickle
import os
def error_msg(error_msg, arg):
"""
Helper function to display error message on the screen.
Input:
The error message along with its respective argument.
(Values include - filename, selected action).
Output:
The formatted error message on the screen along with the argument.
"""
print("****************************")
print(Fore.RED, end='')
print(error_msg,":", arg)
print(Style.RESET_ALL, end='')
print("****************************")
sys.exit(0)
def printINFO(info):
"""
Helper function to ask the user for Input.
Input:
The message that is to be displayed.
Output:
The formatted message on the screen.
"""
print(Fore.BLUE, end='')
print(info)
print(Style.RESET_ALL, end='')
# *****************************************************************************
# *****************************************************************************
# Helper Methods Start
def calculate_num_clusters(df, acl_weights):
"""
Calculates the optimal number of clusters using the elbow_graph approach.
Input:
The Pandas dataframe of the input file (ACL.json)
output:
The value of k that provides the least MSE.
"""
files = ['IP_Access_List', 'Route_Filter_List', 'VRF', 'AS_Path_Access_List',
'IKE_Phase1_Keys', 'IPsec_Phase2_Proposals', 'Routing_Policy']
k_select_vals = [41, 17, 42, 5, 3, 2, 58]
curr_file = file_name.split(".")[0]
file_index = files.index(curr_file)
return k_select_vals[file_index]
features = df[df.columns]
ran = min(len(df.columns), len(discrete_namedstructure))
if ran > 50:
k_range = range(1, 587)
else:
k_range = range(1, ran)
print(k_range)
k_range = range(1, 580)
distortions = []
np.seed = 0
clusters_list = []
f = open('distortions.txt', 'w')
for k in k_range:
print(k)
kmeans = KMeans(n_clusters=k).fit(features, None, sample_weight=acl_weights)
clusters_list.append(kmeans)
cluster_centers = kmeans.cluster_centers_
k_distance = cdist(features, cluster_centers, "euclidean")
distance = np.min(k_distance, axis=1)
distortion = np.sum(distance)/features.shape[0]
distortions.append(distortion)
f.write(str(distortion))
f.write("\n")
kn = KneeLocator(list(k_range), distortions, S=3.0, curve='convex', direction='decreasing')
print("Knee is: ", kn.knee)
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.plot(k_range, distortions, 'bx-')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
if kn.knee is None:
if ran < 5:
return ran - 1
else:
return 5
return kn.knee
'''
for i in range(1, len(avg_within)):
if (avg_within[i-1] - avg_within[i]) < 1:
break
# return i-1 if len(avg_within) > 1 else 1
# return i - 1 if i > 1 else 1
'''
def perform_kmeans_clustering(df, ns_weights):
"""
To get a mapping of the rows into respective clusters generated using the K-means algorithm.
Input:
df:The Pandas data-frame of the input file (ACL.json)
ns_weights: The weights of each name structure which allows the weighted k-means algorithm to work.
Output:
Adding respective K-means cluster label to the input dataframe.
Example:
Row1 - Label 0 //Belongs to Cluster 0
Row2 - Label 0 //Belongs to Cluster 0
Row3 - Label 1 //Belongs to Cluster 1
"""
global k_select
k_select = calculate_num_clusters(df, ns_weights)
features = df[df.columns]
kmeans = KMeans(n_clusters=k_select)
kmeans.fit(features, None, sample_weight=ns_weights)
labels = kmeans.labels_
df["kmeans_cluster_number"] = pd.Series(labels)
def extract_keys(the_dict, prefix=''):
"""
Recursive approach to gather all the keys that have nested keys in the input file.
Input:
The dictionary file to find all the keys in.
Output:
All the keys found in the nested dictionary.
Example:
Consider {key1:value1, key2:{key3:value3}, key4:[value4], key5:[key6:{key7:value7}]}
The function returns key2, key5=key6
"""
key_list = []
for key, value in the_dict.items():
if len(prefix) == 0:
new_prefix = key
else:
new_prefix = prefix + '=' + key
try:
if type(value) == dict:
key_list.extend(extract_keys(value, new_prefix))
elif type(value) == list and type(value[0]) == dict:
key_list.extend(extract_keys(value[0], new_prefix))
elif type(value) == list and type(value[0]) != dict:
key_list.append(new_prefix)
else:
key_list.append(new_prefix)
except:
key_list.append(new_prefix)
return key_list
def get_uniques(data):
"""
A helper function to get unique elements in a List.
Input:
A list that we need to capture uniques from.
Output:
A dictionary with unique entries and count of occurrences.
"""
acl_count_dict = {}
for acl in data:
acl = json.dumps(acl)
if acl not in acl_count_dict:
acl_count_dict[acl] = 1
else:
value = acl_count_dict[acl]
value += 1
acl_count_dict[acl] = value
keys = []
values = []
for key, value in acl_count_dict.items():
keys.append(key)
values.append(value)
return keys, values
def overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the un-nested value along with
the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
for item in data:
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
new_value = value[key]
if type(new_value) == list:
if len(new_value) != 0:
new_value = new_value[0]
else:
new_value = "#BUG#"
value = new_value
if element not in overall:
overall[element] = {}
if value not in overall[element]:
overall[element][value] = 1
else:
overall[element][value] += 1
overall_array.append(overall)
return overall_array
def get_overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the unnested value along with the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
new_value = None
flag = 0
for item in data:
visited = {"lines=name":1}
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
if element not in visited:
visited[element] = 1
new_value = value[key]
flag = 0
if type(new_value) == list:
if len(new_value) > 0:
for list_data in new_value:
if element not in overall:
overall[element] = {}
temp = element
temp_val = list_data
temp = temp.split("=", 1)[-1]
while len(temp.split("=")) > 1:
temp_val = temp_val[temp.split("=")[0]]
temp = temp.split("=", 1)[-1]
list_key = temp
check = 0
try:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] not in overall[element]:
overall[element][temp_val[list_key][0]] = 1
check = 1
else:
if temp_val[list_key] not in overall[element]:
overall[element][temp_val[list_key]] = 1
check = 1
except:
dummy=0
'''
do nothing
'''
try:
if check == 0:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] in overall[element]:
overall[element][temp_val[list_key][0]] += 1
else:
if temp_val[list_key] in overall[element]:
overall[element][temp_val[list_key]] += 1
except:
dummy=0
flag = 1
value = new_value
else:
'''
Type is not list
'''
value = new_value
else:
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
overall_array.append(overall)
return overall_array
def calculate_z_score(arr):
"""
Calculates the Z-score (uses mean) (or) Modified Z-score (uses median) of data-points
Input:
Data points generated from parsing through the input file.
Also considers the Z_SCORE_FLAG that is set previously with 0 (default) using the Modified Z-score and 1 using Z-score.
Output:
The Z-score of given data-points array.
"""
if len(arr) == 1:
return arr
z_score = []
'''
Calculates the Z-score using mean. Generally used if distribution is normal (Bell curve).
'''
if Z_SCORE_FLAG:
mean = np.mean(arr)
std = np.std(arr)
if std == 0:
return np.ones(len(arr)) * 1000
for val in arr:
z_score.append((val - mean) / std)
'''
Modified Z-score approach.
Calculates the Z-score using median. Generally used if distribution is skewed.
'''
else:
median_y = np.median(arr)
medians = [np.abs(y - median_y) for y in arr]
med = np.median(medians)
median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in arr])
if median_absolute_deviation_y == 0:
return np.ones(len(arr)) * 1000
z_score = [0.6745 * (y - median_y) / median_absolute_deviation_y for y in arr]
return z_score
def calculate_signature_d(overall_arr):
"""
Uses Z-score to generate the signatures of data-points and also maps points on level of significance (include for
signature calculation, include for bug calculation, no significance).
If Z-score is equal to 1000.0 or in between sig_threshold and bug_threshold, no-significance.
If Z-score is >= sig_threshold, include for signature calculation.
If Z-score is <= bug_threshold, include for bug calculation.
Input:
The individual master-signature generated for each Cluster.
Output:
An array containing dictionaries marked with tags that represent the action that needs to be performed on them.
"""
signature = {}
for key, value in overall_arr.items():
sig_threshold = 0.5
bug_threshold = -0.1
key_points = []
data_points = []
sig_values = []
for k, v in value.items():
key_points.append(k)
data_points.append(v)
if len(data_points) == 1:
sig_values.append((key_points[0], (data_points[0])))
'''
Check for two data points case
'''
else:
z_score = calculate_z_score(data_points)
if len(z_score) > 0:
avg_z_score = sum(z_score)/len(z_score)
bug_threshold = bug_threshold + (avg_z_score - sig_threshold)
for i in range(len(z_score)):
present_zscore = z_score[i]
if present_zscore == 1000.0:
sig_values.append((key_points[i], "*", (data_points[i])))
elif present_zscore >= sig_threshold:
sig_values.append((key_points[i], (data_points[i])))
elif present_zscore <= bug_threshold:
sig_values.append((key_points[i], "!", (data_points[i])))
elif (present_zscore < sig_threshold) and (present_zscore > bug_threshold):
sig_values.append((key_points[i], "*", (data_points[i])))
if key in signature:
signature[key].append(sig_values)
else:
signature[key] = []
signature[key] += sig_values
return signature
def results(data, signatures):
title = file_name.split(".")[0] + "_Results.txt"
if not os.path.exists(os.path.dirname(title)):
os.makedirs(os.path.dirname(title))
f = open(title, "w")
f.write(title + "\n")
f.write("\n")
totalBugs = 0
totalConformers = 0
for cluster_index, clustered_namedStructure in enumerate(data):
numBugs = 0
numConformers = 0
cluster_signature = signatures[cluster_index]
for namedStructure in clustered_namedStructure:
keys = extract_keys(namedStructure[0])
namedStructure = flatten_json((namedStructure[0]), '=')
isNamedStructureABug = False
newNamedStructure = {}
for key, value in namedStructure.items():
flag = 0
for index, char in enumerate(key):
if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or char == '5' or char == '6' or char == '7' or char == '8' or char == '9':
flag = 1
if index == len(key)-1:
new_key = str(key[0:index-1])
newNamedStructure[new_key] = value
else:
new_key = str(key[0:index-1]) + str(key[index+1:len(key)])
newNamedStructure[new_key] = value
if not flag:
newNamedStructure[key] = value
flag = 0
for propertyKey, propertyValue in newNamedStructure.items():
try:
propValues = cluster_signature[propertyKey]
except:
print("EXCEPTION OCCURRED!")
print(propertyKey)
for value in propValues:
if value[0] == propertyValue and value[1] == '!':
numBugs += 1
isNamedStructureABug = True
if isNamedStructureABug:
numBugs += 1
else:
numConformers += 1
numBugs = len(clustered_namedStructure) - numConformers
f.write("Cluster Index: " + str(cluster_index) + "\n")
f.write(" Number of elements in Cluster = " + str(len(clustered_namedStructure)) + "\n")
f.write(" Number of Bugs using Z-score: " + str(len(clustered_namedStructure) - numConformers) + "\n")
f.write(" Number of Conformers using Z-score: " + str(numConformers) + "\n")
f.write("\n")
totalBugs += numBugs
totalConformers += numConformers
print("Total Bugs = ", totalBugs)
print("Total Confomers = ", totalConformers)
f.write("\n")
f.write("\n")
f.write("Total Bugs using Z-score: " + str(totalBugs) + "\n")
f.write("Total Conformers using Z-score: " + str(totalConformers))
def transform_data(data):
"""
A helper function to extract nested keys from the ACL and to add the frequency of the repeated value. Helps score data.
Input:
An ACL in the form {key1:value1, key2:{key3:value3}, key4:[value4], key5:[key6:{key7:value7}]}.
Output:
Extracted nested keys from the extract_keys function along with the frequency count.
Example:
[
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
]
Returns a new array with the nested keys appended along with a tuple containing the unnested value along with the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':3}
}]
"""
count = 1
overall = {}
flag = 0
i = 0
while i < count:
value = None
result = None
new_value = None
for item in data:
result = extract_keys(item)
for element in result:
value = item
for key in element.split("="):
if key in value:
new_value = value[key]
if (type(new_value) == list) and (len(new_value) > 1):
if flag == 0:
count = len(new_value)
flag = 1
try:
new_value = new_value[i]
except:
new_value = new_value[-1]
elif (type(new_value) == list) and (len(new_value) == 1):
new_value = new_value[0]
value = new_value
if element not in overall:
overall[element] = {}
if type(value) != dict and type(value) != list:
if value not in overall[element]:
overall[element][value] = 1
i += 1
return overall
def calculate_signature_score(signature):
"""
Calculates the signature score for each signature as the sum of all the weights in it but ignoring the weights marked with "*".
Input:
A signature that contains tags of whether or not the weight should be included in calculating the signature.
Output:
An array containing the weights of all the signatures that should be considered.
Example:
Consider [
{'key1=key2':['val1', 40], 'key3=key4':['val2':90]}, //40 + 90
{'key5=key6=key7':['val3', *, 20], 'key8=key9':['val4':80]}, //80
{'key10=key11':['val5', 40]} //40
Returns [130, 80, 40].
"""
score_arr = []
for sig in signature:
score = 0
for key, value in sig.items():
for val in value:
if (val[1] != "!") and (val[1] != "*"):
score += val[1]
elif val[1] == "!":
score += val[2]
score_arr.append(score)
return score_arr
def calculate_namedstructure_scores(data_final, all_signatures):
"""
Calculate the individual scores for each discrete-ACL. This includes calculating human_error scores,
signature_scores, and deviant scores.
Input:
data_final:
List of ACLs grouped into a Cluster.
Example:
[
[acl-1, acl-4, acl-5, acl-9], //Cluster-0
[acl-2, acl-3], //Cluster-1
[acl-7], //Cluster-2
[acl-6, acl-8] //Cluster-3
]
all_signatures:
Consolidated signature for each Cluster.
Output:
deviant_arr: Returns all deviant properties for the ACL. Empty list is returned if no deviant property
in the ACL.
count_arr: [[TODO]]
dev_score: Returns the deviant score for the deviant properties found. 0 if no deviant property.
acls_arr: [[TODO]]
sig_score: Returns the signature score of the ACL.
cluster_num: Returns the cluster number that the ACL belongs to.
acls_score: The score that is generated for each acl
human_errors_arr: Returns the human_error properties (IPValidity, DigitRepetition, PortRange) for each ACL and
empty list if no human_error properties present in the ACL.
human_error_score: Returns the score of the human error property calculated for the ACL. 0 is returned if
no human_error property exists in the ACL.
"""
deviant_arr = []
count_arr = []
acls_dict = {}
acls_arr = []
acls_score = []
sig_score = []
dev_score = []
cluster_num = []
human_errors_arr = []
human_errors_score = []
i = 0
for acl_list in data_final:
bug_count = 0
conformer_count = 0
signature = all_signatures[i]
for acl in acl_list:
flag = 0
if str(acl[0]) not in acls_dict:
acls_dict[str(acl[0])] = 1
acls_arr.append(acl[0])
cluster_num.append(i)
flag = 1
else:
print(acl[0])
print(acls_dict)
continue
sig_score.append(signature_scores[i])
deviant = []
count = 0
dev_c = 0
acl_c = 0
human_errors = []
human_error_category = {}
data = transform_data(acl)
for data_key, data_val in data.items():
if data_key in signature:
'''
Key Valid. Now check for actual Value
'''
for val in data_val.items():
(error_key, error_value), error_category = calculateHumanErrors(data_key, val[0], signature[data_key], file_name.split(".")[0])
if error_category:
human_errors.append((error_key, error_value))
if error_category not in human_error_category:
human_error_category[error_category] = 0
human_error_category[error_category] += 1
for sig_val in signature[data_key]:
if val[0] == sig_val[0]:
'''
value also present. Now check if value part of bug/sig/skip
'''
if sig_val[1] == "!":
dev_c += sig_val[2]
acl_c += sig_val[2]
deviant.append((data_key, sig_val[0]))
bug_count += 1
elif sig_val[1] == "*":
conformer_count += 1
continue
else:
conformer_count += 1
count += sig_val[1]
acl_c += sig_val[1]
else:
'''
Deviant Key
'''
if data_key != "lines=name":
deviant.append(data_key)
dev_c += data_val
acl_c += data_val
if flag == 1:
count_arr.append(count)
deviant_arr.append(deviant)
dev_score.append(dev_c)
acls_score.append(acl_c)
human_errors_arr.append(human_errors)
human_errors_score.append(calculate_human_error_score(human_error_category))
i += 1
return deviant_arr, count_arr, dev_score, acls_arr, sig_score, cluster_num, acls_score, human_errors_arr, human_errors_score
def checkIPValidity(ip_address):
"""
A reg-ex check to verify the validity of an IP address.
Input:
A list of IP addresses
Output:
A boolean representing the validity of the IP address.
Returns 'True' if all the IPs are valid and 'False' if any of the IP is invalid.
"""
try:
ip_address = ip_address.split(":")
for ip in ip_address:
IP_check = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])?(\/)?((3[01]|3[02]|[12][0-9]|[0-9])?)$"
match = re.match(IP_check, ip)
if not match:
return False
return True
except e:
print(e)
return True
def checkPortRange(port_range):
"""
A check to verify that the port range is specified correctly (elem0 <= elem1).
Input:
A string that contains two numbers separated by a '-'.
Output:
A boolean representing the validity of the range (elem0 <= elem1).
Example:
52108-52109 (True)
466 - 466 (True)
466 - 465 (False)
"""
try:
port_split = port_range.split("-")
if port_split[-1] < port_split[0]:
return False
return True
except:
return True
def checkDigitRepetition(digit, signature):
"""
Checks for Digit repetition.
Input:
The value for the following keys: srcPorts, dstPorts, lengthRange
Output:
Returns True if there is any Human Error and the digit is repeated twice.
"""
try:
if type(digit) == str:
digit = float(digit.split(":")[0])
if digit == 0:
return False
for item in signature:
if type(item) == str:
item = int(item.split(":")[0])
if digit == (item*10+item%10):
print("--------", digit, item*10 + item%10)
return True
return False
except:
return False
def calculateHumanErrors(data_key, data, signature, namedStructure):
"""
Checks for simple human errors like entering invalid IP Addresses, incorrect port-ranges, and digit repetitions.
Input:
data_key: The nested keys calculated in the overall_dict and get_overall_dict methods.
Example: key1=key2=key4
data: The data value for the keys.
signature: The signature for the keys that was calculated in the calculate_signature_d method.
namedStructure: The type of the IP file.
Possible values: IP_Access_List, Route_Filter_List, Routing_Policy, VRF, others.
Output:
Returns the error and the category it belongs to.
Example:
key1=key2=key3 [1333.0.0.13] [13172.16.31.10] IP_Access_List
Returns:
key1=key2=key3 [1333.0.0.13] IP
"""
human_error = (None, None)
category = None
data_key = data_key.split("=")[-1]
signature_items = []
for sig_item in signature:
signature_items.append(sig_item[0])
if namedStructure == "IP_Access_List":
if data_key == "ipWildcard":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif data_key in ["dstPorts", "srcPorts"]:
if not checkPortRange(data):
'''
Invalid Ports Range
'''
human_error = (data_key, data)
category = "RANGE"
elif namedStructure == "Route_Filter_List":
if data_key == "ipWildcard":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif data_key == "lengthRange":
if not checkPortRange(data):
'''
Invalid Ports Range
'''
human_error = (data_key, data)
category = "RANGE"
elif namedStructure == "Routing_Policy":
if data_key == "communities":
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
elif data_key == "ips":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif namedStructure == "VRF":
if data_key in ["administrativeCost", "remoteAs", "metric", "localAs", "referenceBandwidth", ]:
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
elif data_key in ["peerAddress", "localIp", "routerId", "network"]:
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
'''
Any Other namedStructure
'''
else:
try:
if re.search('IP|ip', data_key) and not re.search('[a-zA-Z]', data):
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif not re.search("[a-zA-Z]", data):
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
except:
pass
return human_error, category
def calculate_human_error_score(category_dict):
"""
Scores the human_errors that have been found with IPValidity and DigitRepetition errors
weighed as 'high,' i.e, 0.8 and PortRange errors weighed 'medium,' i.e., 0.5.
Input:
A dictionary containing the count of the error occurrences.
Output:
A weighted sum of all the errors found.
"""
total_score = 0
low = 0.2
medium = 0.5
high = 0.8
weightage_dict = {"IP": high, "RANGE": medium, "DIGIT": high}
for category, count in category_dict.items():
if count != 0:
#print("* Human Error Found *")
total_score += weightage_dict[category]/np.log(1+count)
return round(total_score/len(category_dict), 2) if category_dict else total_score
def flatten_json(data, delimiter):
"""
Flattens a JSON file.
Input:
data:
A JSON dictionary of hierarchical format.
{key1: {key2: value2, key3: value3}, key4: {key5: value5, key6: [value6, value7, value8]}}
delimiter:
A parameter to separate the keys in order to facilitate easy splitting.
Output:
A flattened dictionary with keys separated by the delimiter parameter.
key1_key2:value2, key1_key3:value3, key4_key5:value5, key4_key6:value6, key4_key6:value7, key4_key6:value8
"""
out = {}
def flatten(data, name=''):
if type(data) is dict:
for key in data:
flatten(data[key], name + key + delimiter)
elif type(data) is list:
i = 0
for elem in data:
flatten(elem, name + str(i) + delimiter)
i += 1
else:
out[name[:-1]] = data
flatten(data)
return out
def encode_data(data):
"""
Converts categorical values into numeric values. We use MultiLabelBinarizer to encode categorical data.
This is done in order to pass the data into clustering and other similar algorithms that can only handle numerical data.
Flattens each ACL list and then encodes them.
Input:
A Python list that contains all discrete-ACLs.
Output:
A Python list after encoding.
"""
flattenedData = []
allKeys = []
for NS in data:
flattenedNamedStructure = flatten_json(NS, '_')
flattenedData.append(flattenedNamedStructure)
for key in flattenedNamedStructure.keys():
if key not in allKeys:
allKeys.append(key)
mergedData = []
for NS in flattenedData:
mergedNS = []
for key, value in NS.items():
mergedNS.append(str(value))
mergedData.append(mergedNS)
mlb = MultiLabelBinarizer()
data_T = mlb.fit_transform(mergedData)
print("MLb classes=")
print(mlb.classes_)
return data_T, mlb.classes_
def export_clusters(data, acl_weight_mapper):
"""
Helper Method to verify authenticity of Clusters being formed.
Input:
The data that is sorted into list of Clusters.
Example:
[
[acl-1, acl-4, acl-5, acl-9], //Cluster-0
[acl-2, acl-3], //Cluster-1
[acl-7], //Cluster-2
[acl-6, acl-8] //Cluster-3
]
We also make use of acl_dict and node_name_dict dictionaries by searching for the ACL and
then getting the appropriate ACL_name and the nodes that the ACL is present in.
Output:
A csv file by the name of Generated_Clusters is written in the format:
Cluster-0 |||| Cluster-0 Names |||| Cluster-0 Nodes |||| Cluster-1 |||| Cluster-1 Names |||| Cluster-1 Nodes
acl-1 |||| permit tcp eq 51107 |||| st55in15hras |||| acl-2 |||| permit udp any eq 1200 |||| rt73ve11m5ar
acl-4 |||| permit tcp eq 51102 |||| st55in15hras, st55in17hras |||| acl-3 |||| permit udp any eq 120002 |||| rt73ve10m4ar
acl-5 |||| permit tcp eq 51100 |||| st55in17hras ||||
acl-9 |||| permit tcp eq 51109 |||| st55in17hras ||||
"""
column_labels = []
for index in range(len(data)):
column_labels.append("Cluster " + str(index))
column_labels.append("Cluster " + str(index) + " ACL Weights")
column_labels.append("Cluster " + str(index) + " Nodes")
data_to_export = pd.DataFrame(columns=column_labels)
for cluster_index, cluster_data in enumerate(data):
discrete_ACL_nodes = []
cluster_weights = []
for discrete_ACL in cluster_data:
temp = json.dumps(discrete_ACL[0], sort_keys=True)
temp_arr = []
try:
for node in namedstructure_node_mapper[temp]:
temp_arr.append(node)
discrete_ACL_nodes.append(temp_arr)
except:
discrete_ACL_nodes.append(None)
cluster_weights.append(acl_weight_mapper[temp])
cluster_data = pd.Series(cluster_data)
cluster_weights_series = pd.Series(cluster_weights)
discrete_ACL_nodes = | pd.Series(discrete_ACL_nodes) | pandas.Series |
#pylint: disable=too-many-lines
"""Wells and WellSegment components."""
from copy import deepcopy
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import h5py
from anytree import (RenderTree, AsciiStyle, Resolver, PreOrderIter, PostOrderIter,
find_by_attr)
from .well_segment import WellSegment
from .rates import calculate_cf, show_rates, show_rates2, show_blocks_dynamics
from .grids import OrthogonalUniformGrid
from .base_component import BaseComponent
from .utils import full_ind_to_active_ind, active_ind_to_full_ind
from .getting_wellblocks import defining_wellblocks_vtk, find_first_entering_point, defining_wellblocks_compdat
from .wells_dump_utils import write_perf, write_events, write_schedule, write_welspecs
from .wells_load_utils import (load_rsm, load_ecl_binary, load_group, load_grouptree,
load_welspecs, load_compdat, load_wconprod, load_wconinje,
load_welltracks, load_events, load_history,
DEFAULTS, VALUE_CONTROL)
from .decorators import apply_to_each_segment, state_check
class IterableWells:
"""Wells iterator."""
def __init__(self, root):
self.iter = PreOrderIter(root)
def __next__(self):
x = next(self.iter)
if x.is_group:
return next(self)
return x
class Wells(BaseComponent):
"""Wells component.
Contains wells and groups in a single tree structure, wells attributes
and preprocessing actions.
Parameters
----------
node : WellSegment, optional
Root node for well's tree.
"""
def __init__(self, node=None, **kwargs):
super().__init__(**kwargs)
self._root = WellSegment(name='FIELD', is_group=True) if node is None else node
self._resolver = Resolver()
self.init_state(has_blocks=False,
has_cf=False,
spatial=True,
all_tracks_complete=False,
all_tracks_inside=False,
full_perforation=False)
def copy(self):
"""Returns a deepcopy. Cached properties are not copied."""
copy = self.__class__(self.root.copy())
copy._state = deepcopy(self.state) #pylint: disable=protected-access
for node in PreOrderIter(self.root):
if node.is_root:
continue
node_copy = node.copy()
node_copy.parent = copy[node.parent.name]
return copy
@property
def root(self):
"""Tree root."""
return self._root
@property
def resolver(self):
"""Tree resolver."""
return self._resolver
@property
def main_branches(self):
"""List of main branches names."""
return [node.name for node in self if node.is_main_branch]
@property
def names(self):
"""List of well names."""
return [node.name for node in self]
@property
def event_dates(self):
"""List of dates with any event in main branches."""
return self._collect_dates('EVENTS')
@property
def result_dates(self):
"""List of dates with any result in main branches."""
return self._collect_dates('RESULTS')
@property
def history_dates(self):
"""List of dates with any history in main branches."""
return self._collect_dates('HISTORY')
def _collect_dates(self, attr):
"""List of common dates given in the attribute of main branches."""
agg = [getattr(node, attr).DATE for node in self if node and attr in node]
if not agg:
return pd.to_datetime([])
dates = sorted(pd.concat(agg).unique())
return | pd.to_datetime(dates) | pandas.to_datetime |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = | Period('1Q05') | pandas.tseries.period.Period |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
#
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
books = pd.read_csv('data/books.csv', encoding = "ISO-8859-1")
books.head()
ratings = pd.read_csv('data/ratings.csv', encoding = "ISO-8859-1")
ratings.head()
book_tags = pd.read_csv('data/book_tags.csv', encoding = "ISO-8859-1")
book_tags.head()
tags = pd.read_csv('data/tags.csv')
tags.tail()
tags_join_DF = | pd.merge(book_tags, tags, left_on='tag_id', right_on='tag_id', how='inner') | pandas.merge |
import matplotlib.pyplot as plt
import datetime as datetime
import numpy as np
import pandas as pd
import talib
import seaborn as sns
from time import time
from sklearn import preprocessing
from pandas.plotting import register_matplotlib_converters
from .factorize import FactorManagement
import scipy.stats as stats
import cvxpy as cvx
import zipfile
import os
from sklearn import linear_model, decomposition, ensemble, preprocessing, isotonic, metrics
from sklearn.impute import SimpleImputer
import xgboost
register_matplotlib_converters()
class Learner:
def __init__(self):
pass
@staticmethod
def shift_mask_data(X, Y, upper_percentile, lower_percentile, n_fwd_days):
# Shift X to match factors at t to returns at t+n_fwd_days (we want to predict future returns after all)
shifted_X = np.roll(X, n_fwd_days + 1, axis=0)
# Slice off rolled elements
X = shifted_X[n_fwd_days + 1:]
Y = Y[n_fwd_days + 1:]
n_time, n_stocks, n_factors = X.shape
# Look for biggest up and down movers
upper = np.nanpercentile(Y, upper_percentile, axis=1)[:, np.newaxis]
lower = np.nanpercentile(Y, lower_percentile, axis=1)[:, np.newaxis]
upper_mask = (Y >= upper)
lower_mask = (Y <= lower)
mask = upper_mask | lower_mask # This also drops nans
mask = mask.flatten()
# Only try to predict whether a stock moved up/down relative to other stocks
Y_binary = np.zeros(n_time * n_stocks)
Y_binary[upper_mask.flatten()] = 1
Y_binary[lower_mask.flatten()] = -1
# Flatten X
X = X.reshape((n_time * n_stocks, n_factors))
# Drop stocks that did not move much (i.e. are in the 30th to 70th percentile)
X = X[mask]
Y_binary = Y_binary[mask]
return X, Y_binary
def feature_importance_adaboost(self, n_fwd_days, close, all_factors, n_estimators, train_size,
upper_percentile, lower_percentile):
pipe = all_factors
pipe.index = pipe.index.set_levels([pd.to_datetime(pipe.index.levels[0]), pipe.index.levels[1]])
close = close[pipe.index.levels[1]]
close.index = pd.to_datetime(close.index)
chunk_start = pipe.index.levels[0][0]
chunk_end = pipe.index.levels[0][-1]
returns = FactorManagement().log_Returns(close, 1).loc[slice(chunk_start, chunk_end), :]
returns_stacked = returns.stack().to_frame('Returns')
results = pd.concat([pipe, returns_stacked], axis=1)
results.index.set_names(['date', 'asset'], inplace=True)
results_wo_returns = results.copy()
returns = results_wo_returns.pop('Returns')
Y = returns.unstack().values
X = results_wo_returns.to_xarray().to_array()
X = np.array(X)
X = X.swapaxes(2, 0).swapaxes(0, 1) # (factors, time, stocks) -> (time, stocks, factors)
# Train-test split
train_size_perc = train_size
n_time, n_stocks, n_factors = X.shape
train_size = np.int16(np.round(train_size_perc * n_time))
X_train, Y_train = X[:train_size], Y[:train_size]
X_test, Y_test = X[(train_size + n_fwd_days):], Y[(train_size + n_fwd_days):]
X_train_shift, Y_train_shift = self.shift_mask_data(X_train, Y_train, n_fwd_days=n_fwd_days,
lower_percentile=lower_percentile,
upper_percentile=upper_percentile)
X_test_shift, Y_test_shift = self.shift_mask_data(X_test, Y_test, n_fwd_days=n_fwd_days,
lower_percentile=lower_percentile,
upper_percentile=upper_percentile)
start_timer = time()
# Train classifier
imputer = SimpleImputer()
scaler = preprocessing.MinMaxScaler()
clf = ensemble.AdaBoostClassifier(
n_estimators=n_estimators) # n_estimators controls how many weak classifiers are fi
X_train_trans = imputer.fit_transform(X_train_shift)
X_train_trans = scaler.fit_transform(X_train_trans)
clf.fit(X_train_trans, Y_train_shift)
end_timer = time()
print('Time to train full ML pipline: {} secs'.format(end_timer - start_timer))
Y_pred = clf.predict(X_train_trans)
print('Accuracy on train set = {:.2f}%'.format(metrics.accuracy_score(Y_train_shift, Y_pred) * 100))
# Transform test data
X_test_trans = imputer.transform(X_test_shift)
X_test_trans = scaler.transform(X_test_trans)
# Predict!
Y_pred = clf.predict(X_test_trans)
Y_pred_prob = clf.predict_proba(X_test_trans)
print('Predictions:', Y_pred)
print('Probabilities of class == 1:', Y_pred_prob[:, 1] * 100)
print('Accuracy on test set = {:.2f}%'.format(metrics.accuracy_score(Y_test_shift, Y_pred) * 100))
print('Log-loss = {:.5f}'.format(metrics.log_loss(Y_test_shift, Y_pred_prob)))
feature_importances = pd.Series(clf.feature_importances_, index=results_wo_returns.columns)
feature_importances.sort_values(ascending=False)
ax = feature_importances.plot(kind='bar')
ax.set(ylabel='Importance (Gini Coefficient)', title='Feature importances')
feature_importances = pd.DataFrame(data=feature_importances.values,
columns=['weights'],
index=feature_importances.index)
feature_importances.index.name = 'factors'
return feature_importances
def feature_importance_xgb(self, n_fwd_days, close, all_factors, n_estimators, train_size,
upper_percentile, lower_percentile):
pipe = all_factors
pipe.index = pipe.index.set_levels([pd.to_datetime(pipe.index.levels[0]), pipe.index.levels[1]])
close = close[pipe.index.levels[1]]
close.index = | pd.to_datetime(close.index) | pandas.to_datetime |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = | Int64Index([], name='Foo') | pandas.core.index.Int64Index |
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
import random
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
#Helpful functions for the project
#Input: Master DF
#Output: Master DF with winner_label column filled in to show who won team one or team twol
def set_winners(master_df):
id_list = master_df.id.unique()
for i in id_list:
t1 = ((master_df[master_df['id'] == i]).team_one).iloc[0]
t2 = ((master_df[master_df['id'] == i]).team_two).iloc[0]
winner = ((master_df[master_df['id'] == i]).winner).iloc[0]
mask = master_df['id'] == i
if t1 == winner:
master_df['winner_label'][mask] = 0
elif t2 == winner:
master_df['winner_label'][mask] = 1
return master_df
#Very specific helper function
#returns a list of the following information:
#Returned {team_one_wins, team_one_losses, team_one_total_matches, team_two_wins, team_two_losses, team_two_total_matches}
#Takes the master_df and a match ID as the input
def get_win_loss_total(wl_df, wl_id):
t1 = ((wl_df[wl_df['id'] == wl_id]).team_one).iloc[0]
t2 = ((wl_df[wl_df['id'] == wl_id]).team_two).iloc[0]
date = ((wl_df[wl_df['id'] == wl_id]).date).iloc[0]
#print(f"Team 1: {t1} Team 2: {t2} Date: {date}")
teams=[t1, t2]
return_info=[]
for team in teams:
df_partial_season=wl_df[wl_df['date']<date]
season_1 = df_partial_season[df_partial_season['team_one'] == team]
season_2 = df_partial_season[df_partial_season['team_two'] == team]
team_season = season_1.append(season_2)
wins = len(team_season[team_season['winner'] == team])
losses = len(team_season[team_season['winner'] != team])
matches = wins+losses
#print(f"{team} {len(season_1)} {len(season_2)} {len(team_season)} {wins} ")
return_info.append(wins)
return_info.append(losses)
return_info.append(matches)
return(return_info)
#This data isn't in the Blizzard information dump so it has to be hardcoded
#Input: master_df
#output: master_df with last season's results
def find_last_season_results(master_df):
results_2018 = {'Philadelphia Fusion':6,
'New York Excelsior':1,
'Seoul Dynasty':8,
'Shanghai Dragons':12,
'Toronto Defiant': '',
'Atlanta Reign': '',
'Dallas Fuel': 10,
'Chengdu Hunters': '',
'London Spitfire': 5,
'Washington Justice': '',
'Los Angeles Valiant': 2,
'Vancouver Titans': '',
'Houston Outlaws': 7,
'San Francisco Shock': 9,
'Guangzhou Charge': '',
'Los Angeles Gladiators': 4,
'Hangzhou Spark': '',
'Florida Mayhem': 11,
'Paris Eternal': '',
'Boston Uprising': 3
}
results_2019 = {
'Toronto Defiant':17, 'London Spitfire':7, 'Los Angeles Gladiators':5,
'Los Angeles Valiant':13, 'Boston Uprising':19, 'San Francisco Shock':3,
'Florida Mayhem':20, 'Washington Justice':17, 'New York Excelsior':2,
'Paris Eternal':14, 'Guangzhou Charge':9, 'Chengdu Hunters':12, 'Hangzhou Spark':4,
'Seoul Dynasty':8, 'Shanghai Dragons':11, 'Houston Outlaws':16,
'Philadelphia Fusion':10, 'Dallas Fuel':15, 'Vancouver Titans':1, 'Atlanta Reign':6
}
years = ['2019', '2020']
for year in years:
if year == '2019':
min_date = '01/01/2019'
max_date = '12/31/2019'
elif year == '2020':
min_date = '01/01/2020'
max_date = '12/31/2020'
df_reduced = master_df[master_df['date'] > min_date]
df_reduced = df_reduced[df_reduced['date'] < max_date]
#print(df_reduced.team_one.unique())
id_list=df_reduced.id.unique()
for i in id_list:
t1 = ((df_reduced[df_reduced['id'] == i]).team_one).iloc[0]
t2 = ((df_reduced[df_reduced['id'] == i]).team_two).iloc[0]
mask = master_df['id'] == i
if year=='2019':
master_df['t1_place_last_season'][mask] = results_2018.get(t1)
master_df['t2_place_last_season'][mask] = results_2018.get(t2)
if year=='2020':
master_df['t1_place_last_season'][mask] = results_2019.get(t1)
master_df['t2_place_last_season'][mask] = results_2019.get(t2)
return master_df
#Input: Master_df
#Output: master_df with head-to-head information filled in
def find_head_to_head_results(master_df):
id_list = master_df.id.unique()
for i in id_list:
t1 = ((master_df[master_df['id'] == i]).team_one).iloc[0]
t2 = ((master_df[master_df['id'] == i]).team_two).iloc[0]
date = ((master_df[master_df['id'] == i]).date).iloc[0]
#print(f"Team 1: {t1} Team 2: {t2} Date: {date}")
df_partial_season=master_df[master_df['date']<date]
season_1 = df_partial_season[df_partial_season['team_one'] == t1]
season_1 = season_1[season_1['team_two'] == t2]
season_2 = df_partial_season[df_partial_season['team_two'] == t1]
season_2 = season_2[season_2['team_one'] == t2]
team_season = season_1.append(season_2)
wins = len(team_season[team_season['winner'] == t1])
losses = len(team_season[team_season['winner'] != t1])
matches = wins+losses
mask = master_df['id'] == i
master_df['t1_wins_vs_t2'][mask] = wins
master_df['t1_losses_vs_t2'][mask] = losses
master_df['t1_matches_vs_t2'][mask] = matches
#print(f"{t1} vs {t2} on {date} record: {wins} - {losses} ")
if matches > 0:
master_df['t1_win_percent_vs_t2'][mask] = wins/matches
return master_df
#input: #master_df, min_date, max_date, type_option(current_season OR all_time)
def find_team_match_results(master_df, min_date, max_date, type_option):
df_reduced = master_df[master_df['date'] > min_date]
df_reduced = df_reduced[df_reduced['date'] < max_date]
#We can use this to make an ID list...
id_list = df_reduced.id.unique()
for i in id_list:
win_loss_list = (get_win_loss_total(df_reduced, i))
mask = master_df['id'] == i
#calculate the win percentages for each match
if win_loss_list[2] == 0:
t1_win_percent = 0
else:
t1_win_percent = win_loss_list[0] / win_loss_list[2]
if win_loss_list[5] == 0:
t2_win_percent = 0
else:
t2_win_percent = win_loss_list[3] / win_loss_list[5]
if type_option=='current_season':
master_df['t1_wins_season'][mask] = win_loss_list[0]
master_df['t1_losses_season'][mask] = win_loss_list[1]
master_df['t1_matches_season'][mask] = win_loss_list[2]
master_df['t1_win_percent_season'][mask] = t1_win_percent
master_df['t2_wins_season'][mask] = win_loss_list[3]
master_df['t2_losses_season'][mask] = win_loss_list[4]
master_df['t2_matches_season'][mask] = win_loss_list[5]
master_df['t2_win_percent_season'][mask] = t2_win_percent
if type_option=='all_time':
master_df['t1_wins_alltime'][mask] = win_loss_list[0]
master_df['t1_losses_alltime'][mask] = win_loss_list[1]
master_df['t1_matches_alltime'][mask] = win_loss_list[2]
master_df['t1_win_percent_alltime'][mask] = t1_win_percent
master_df['t2_wins_alltime'][mask] = win_loss_list[3]
master_df['t2_losses_alltime'][mask] = win_loss_list[4]
master_df['t2_matches_alltime'][mask] = win_loss_list[5]
master_df['t2_win_percent_alltime'][mask] = t2_win_percent
return(master_df)
#Input: master_df, number of matches we want to look at
#Output: master_df updated with results filled in
def find_last_n_results(master_df, n):
id_list = master_df.id.unique()
for i in id_list:
t1 = ((master_df[master_df['id'] == i]).team_one).iloc[0]
t2 = ((master_df[master_df['id'] == i]).team_two).iloc[0]
date = ((master_df[master_df['id'] == i]).date).iloc[0]
teams=[t1, t2]
win_loss_list=[]
for team in teams:
df_partial_season=master_df[master_df['date']<date]
season_1 = df_partial_season[df_partial_season['team_one'] == team]
season_2 = df_partial_season[df_partial_season['team_two'] == team]
#We need to combine season 1 and season 2 and sort by date
team_df = pd.concat([season_1, season_2])
#print(f"season_1: {season_1.shape} season_2: {season_2.shape} team_df: {team_df.shape}")
team_df.sort_values(by=['date'], inplace=True, ascending=False)
#display(team_df.head)
top_n_team_df = team_df.head(n)
wins = len(top_n_team_df[top_n_team_df['winner'] == team])
losses = len(top_n_team_df[top_n_team_df['winner'] != team])
matches = wins+losses
#print(f"{team} {len(season_1)} {len(season_2)} {len(team_season)} {wins} ")
if matches >= n:
win_loss_list.append(wins)
win_loss_list.append(losses)
win_loss_list.append(matches)
else:
win_loss_list.append("")
win_loss_list.append("")
win_loss_list.append("")
#print(return_info)
mask = master_df['id'] == i
#calculate the win percentages for each match
if win_loss_list[2] == 0:
t1_win_percent = 0
elif win_loss_list[2] == "":
t1_win_percent = ""
else:
t1_win_percent = win_loss_list[0] / win_loss_list[2]
if win_loss_list[5] == 0:
t2_win_percent = 0
elif win_loss_list[5] == "":
t2_win_percent = ""
else:
t2_win_percent = win_loss_list[3] / win_loss_list[5]
if n == 3:
master_df['t1_wins_last_3'][mask] = win_loss_list[0]
master_df['t1_losses_last_3'][mask] = win_loss_list[1]
master_df['t1_win_percent_last_3'][mask] = t1_win_percent
master_df['t2_wins_last_3'][mask] = win_loss_list[3]
master_df['t2_losses_last_3'][mask] = win_loss_list[4]
master_df['t2_win_percent_last_3'][mask] = t2_win_percent
if n == 5:
master_df['t1_wins_last_5'][mask] = win_loss_list[0]
master_df['t1_losses_last_5'][mask] = win_loss_list[1]
master_df['t1_win_percent_last_5'][mask] = t1_win_percent
master_df['t2_wins_last_5'][mask] = win_loss_list[3]
master_df['t2_losses_last_5'][mask] = win_loss_list[4]
master_df['t2_win_percent_last_5'][mask] = t2_win_percent
elif n == 10:
master_df['t1_wins_last_10'][mask] = win_loss_list[0]
master_df['t1_losses_last_10'][mask] = win_loss_list[1]
master_df['t1_win_percent_last_10'][mask] = t1_win_percent
master_df['t2_wins_last_10'][mask] = win_loss_list[3]
master_df['t2_losses_last_10'][mask] = win_loss_list[4]
master_df['t2_win_percent_last_10'][mask] = t2_win_percent
elif n == 20:
master_df['t1_wins_last_20'][mask] = win_loss_list[0]
master_df['t1_losses_last_20'][mask] = win_loss_list[1]
master_df['t1_win_percent_last_20'][mask] = t1_win_percent
master_df['t2_wins_last_20'][mask] = win_loss_list[3]
master_df['t2_losses_last_20'][mask] = win_loss_list[4]
master_df['t2_win_percent_last_20'][mask] = t2_win_percent
return master_df
#Input: American Odds, and Probability of a Winning Bet
#Output: Bet EV based on a $100 bet
def get_bet_ev(odds, prob):
if odds>0:
return ((odds * prob) - (100 * (1-prob)) )
else:
return ((100 / abs(odds))*100*prob - (100 * (1-prob)))
#Input: American Odds
#Output: Profit on a successful bet
def get_bet_return(odds):
if odds>0:
return odds
else:
return (100 / abs(odds))*100
#Input DF must have these columns:
#t1_odds (American)
#t2_odds (American)
#t1_prob (0->1)
#t2_prob (0->1)
#winner (0 or 1)
#OUTPUT: Profit per bet (based on bet of $100)
def get_ev_from_df(ev_df, print_stats = False, min_ev = 0, get_total = True):
num_matches = 0
num_bets = 0
num_wins = 0
num_losses= 0
num_under= 0
num_under_losses = 0
num_under_wins = 0
num_even = 0
num_even_losses = 0
num_even_wins = 0
num_fav = 0
num_fav_wins = 0
num_fav_losses = 0
profit = 0
profit_per_bet = 0
profit_per_match = 0
for index, row in ev_df.iterrows():
num_matches = num_matches+1
t1_bet_ev = get_bet_ev(row['t1_odds'], row['t1_prob'])
#print(f"ODDS:{row['t1_odds']} PROB: {row['t1_prob']} EV: {t1_bet_ev}")
t2_bet_ev = get_bet_ev(row['t2_odds'], row['t2_prob'])
#print(f"ODDS:{row['t2_odds']} PROB: {row['t2_prob']} EV: {t2_bet_ev}")
#print()
t1_bet_return = get_bet_return(row['t1_odds'])
t2_bet_return = get_bet_return(row['t2_odds'])
if (t1_bet_ev > min_ev or t2_bet_ev > min_ev):
num_bets = num_bets+1
if t1_bet_ev > min_ev:
if row['winner'] == 0:
num_wins += 1
profit = profit + t1_bet_return
#print(t1_bet_return)
elif row['winner'] == 1:
num_losses += 1
profit = profit - 100
if (t1_bet_return > t2_bet_return):
num_under += 1
if row['winner'] == 0:
num_under_wins += 1
elif row['winner'] == 1:
num_under_losses += 1
elif (t1_bet_return < t2_bet_return):
num_fav += 1
if row['winner'] == 0:
num_fav_wins += 1
elif row['winner'] == 1:
num_fav_losses += 1
else:
num_even += 1
if row['winner'] == 0:
num_even_wins += 1
elif row['winner'] == 1:
num_even_losses += 1
if t2_bet_ev > min_ev:
if row['winner'] == 1:
num_wins += 1
profit = profit + t2_bet_return
elif row['winner'] == 0:
num_losses += 1
profit = profit - 100
if (t2_bet_return > t1_bet_return):
num_under += 1
if row['winner'] == 1:
num_under_wins += 1
elif row['winner'] == 0:
num_under_losses += 1
elif (t2_bet_return < t1_bet_return):
num_fav += 1
if row['winner'] == 1:
num_fav_wins += 1
elif row['winner'] == 0:
num_fav_losses += 1
else:
num_even += 1
if row['winner'] == 1:
num_even_wins += 1
elif row['winner'] == 0:
num_even_losses += 1
if num_bets > 0:
profit_per_bet = profit / num_bets
else:
profit_per_bet = 0
if num_matches > 0:
profit_per_match = profit / num_matches
else:
profit_per_match = 0
if print_stats:
print(f"""
Number of matches: {num_matches}
Number of bets: {num_bets}
Number of winning bets: {num_wins}
Number of losing bets: {num_losses}
Number of underdog bets: {num_under}
Number of underdog wins: {num_under_wins}
Number of underdog losses: {num_under_losses}
Number of Favorite bets: {num_fav}
Number of favorite wins: {num_fav_wins}
Number of favorite losses: {num_fav_losses}
Number of even bets: {num_even}
Number of even wins: {num_even_wins}
Number of even losses: {num_even_losses}
Profit: {profit}
Profit per bet: {profit_per_bet}
Profit per match: {profit_per_match}
""")
if (get_total):
#print(f"# Matches: {num_matches}, # Bets: {num_bets} # Wins: {num_wins}")
return(profit)
else:
return (profit_per_bet)
#Input the train df and model and we will return a customer 5x cross validation score based off of expected value
#t1_odds and t2_odd MUST be the last 2 columns or this will break.
def custom_cv_eval(df, m, labels, odds, min_ev=0, verbose=False, get_total=True):
X = np.array(df)
y = np.array(labels)
odds = np.array(odds)
running_total = 0
count=1
kf = KFold(n_splits=5, shuffle=True, random_state=75)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
odds_train, odds_test = odds[train_index], odds[test_index]
#display(y_train)
m.fit(X_train, y_train)
probs=m.predict_proba(X_test)
#print(probs)
#We need to prep the dataframe to evaluate....
#X_odds = X_test[['t1_odds', 't2_odds']]
#print(X_test)
#print(X_test[:, -1])
#print(X_test[:, -2])
X_odds = list(zip(odds_test[:, -2], odds_test[:, -1], probs[:, 0], probs[:, 1], y_test))
ev_prepped_df = pd.DataFrame(X_odds, columns=['t1_odds', 't2_odds', 't1_prob', 't2_prob', 'winner'])
#display(ev_prepped_df)
#display(temp_df)
#print(f"{count}: {get_ev_from_df(ev_prepped_df, print_stats = False)}")
count=count+1
running_total = running_total + get_ev_from_df(ev_prepped_df, print_stats = verbose, min_ev = min_ev, get_total=get_total)
#display(ev_prepped_df)
return running_total
def get_best_features(features, model, df, current_features, scale=False):
best_feature = ""
winner_labels = df['winner_label'].copy()
initial_df = df[current_features]
#display(initial_df)
#display(winner_labels)
best_score = custom_cv_eval(df[current_features], model)
best_feature = ""
print(f"Current best score is: {best_score}")
for f in features:
if f not in current_features:
new_features = [f] + current_features
df_sel=df[new_features]
if scale == True:
sc = StandardScaler()
df_sel = sc.fit_transform(df_sel)
new_score = custom_cv_eval(df_sel, model)
#print(f"Total score for {f} is: {new_score}")
if new_score > best_score:
best_score = new_score
best_feature = f
#print()
#Keep running until we don't improve
if best_feature != "":
print(f"The best feature was {best_feature}. It scored {best_score}")
current_features = [best_feature] + current_features
return(get_best_features(features, model, df, current_features, scale))
else:
print("NO IMPROVEMENT")
print(f"FINAL BEST SCORE: {best_score}")
return current_features
def get_best_features_v2(pos_features, m, df, cur_features, labels, odds, scale=False, min_ev=0):
best_feature = ''
#If there are no current features...
if len(cur_features) == 0:
best_score = -100
else:
df_sel = df[cur_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
#OK we need to filter the labels and odds based off of the indices
labels_sel = labels[labels.index.isin(df_sel.index)]
odds_sel = odds[odds.index.isin(df_sel.index)]
best_score = custom_cv_eval(df_sel, m, labels_sel, odds_sel, min_ev=min_ev, get_total=True)
best_feature = ""
print(f"Current best score is: {best_score}")
#Go thru every feature and test it...
for f in pos_features:
#If f is not a current feature
if f not in cur_features:
new_features = [f] + cur_features
df_sel = df[new_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
#display(df_sel)
#OK we need to filter the labels and odds based off of the indices
labels_sel = labels[labels.index.isin(df_sel.index)]
odds_sel = odds[odds.index.isin(df_sel.index)]
new_score = custom_cv_eval(df_sel, m, labels_sel, odds_sel, min_ev=min_ev, get_total=True)
#print(f"{len(df_sel)} {len(labels_sel)} {len(odds_sel)}")
if new_score > best_score:
print(f"Feature: {f} Score: {new_score}")
best_score = new_score
best_feature = f
if best_feature != "":
print(f"The best feature was {best_feature}. It scored {best_score}")
cur_features = [best_feature] + cur_features
#Keep running until we don't improve
return(get_best_features_v2(pos_features, m, df, cur_features, labels, odds, scale, min_ev=min_ev))
else:
print("NO IMPROVEMENT")
print(f"FINAL BEST SCORE: {best_score}")
return cur_features
return []
def get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev = 0, verbose=False, get_total=True):
df_sel = input_df[input_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
labels_sel = input_labels[input_labels.index.isin(df_sel.index)]
odds_sel = odds_input[odds_input.index.isin(df_sel.index)]
best_score = custom_cv_eval(df_sel, input_model, labels_sel, odds_sel, min_ev = min_ev, verbose=verbose,
get_total=get_total)
return best_score
def tune_LogisticRegression(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. penalty ('l1' or 'l2')
#2. tol (original_value, original_value * 1.2, original_value * 0.8, rand(0, 10)
#3. random_state = 75
#4. solver = 'newton-cg', 'lbfgs', 'sag', 'saga'
###############################################################################################################
print()
print()
print("Starting New Run for LogisticRegression")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
penalty = ['l1', 'l2', 'none']
solver = ['newton-cg', 'lbfgs', 'sag', 'saga']
tol = [input_model.tol, input_model.tol * 1.2, input_model.tol * .8, random.random() * 10 ]
for s in solver:
score = -10000
for p in penalty:
for t in tol:
if ((s == 'newton-cg') & (p == 'l1')) |\
((s == 'lbfgs') & (p == 'l1')) |\
((s == 'sag') & (p == 'l1')):
pass
else:
test_model = LogisticRegression(solver = s, penalty = p, tol=t, random_state=75, max_iter=50000)
score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("solver:", s,
"penalty:", p,
"tol:", t,
"Best Score:", best_score)
print()
print()
else:
pass
print("solver:", s,
"penalty:", p,
"tol:", t,
"Score:", score)
return(output_model)
def tune_DecisionTreeClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. criterion ('gini', 'entropy')
#2. splitter ('random', 'best')
#3. max_depth ('none', IF A NUMBER EXISTS +1, -1, random, else 2 RANDOM INTS 1->100)
#4. min_samples_leaf(n-1, 0, n+1)
#5. max_leaf_nodes:('none', n+1, n-1, OR 4 random numbers)
###############################################################################################################
print()
print()
print("Starting New Run for DecisionTree")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
criterion = ['gini', 'entropy']
splitter = ['random', 'best']
if input_model.max_depth == None:
max_depth = [None, random.randrange(100), random.randrange(100)]
else:
max_depth = [input_model.max_depth, input_model.max_depth - 1, input_model.max_depth + 1, random.randrange(100)]
max_depth = [i for i in max_depth if i > 0]
min_samples_leaf = [input_model.min_samples_leaf, input_model.min_samples_leaf - 1,
input_model.min_samples_leaf + 1, random.randrange(100)]
min_samples_leaf = [i for i in min_samples_leaf if i > 0]
if input_model.max_leaf_nodes == None:
max_leaf_nodes = [None, random.randrange(1000), random.randrange(1000)]
else:
max_leaf_nodes = [input_model.max_leaf_nodes, input_model.max_leaf_nodes - 1,
input_model.max_leaf_nodes + 1, random.randrange(1000)]
max_leaf_nodes = [i for i in max_leaf_nodes if i > 0]
for l in max_leaf_nodes:
for sam in min_samples_leaf:
for m in max_depth:
for c in criterion:
for s in splitter:
test_model = DecisionTreeClassifier(criterion = c, splitter = s, max_depth = m,
min_samples_leaf=sam, max_leaf_nodes = l, random_state=75)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("Criterion:", c, "splitter:", s, "max_depth:", m,
"min_samples_leaf:", sam, "max_leaf_nodes:", l, best_score)
print()
else:
pass
print("Criterion:", c, "splitter:", s, "max_depth:", m,
"min_samples_leaf:", sam, "max_leaf_nodes:", l, score)
return output_model
def tune_RandomForestClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. criterion ('gini', 'entropy')
#2. max_features ('auto', 'sqrt', 'log2')
#3. max_depth ('none', IF A NUMBER EXISTS +2, -2, ELSE 2 RANDOM INTS 1->100)
#4. min_samples_leaf(n-2, 0, n+2)
#5. max_leaf_nodes:('none', n+2, n-2, OR 2 random numbers)
#6. n_estimators: (n, n+2, n-2)
###############################################################################################################
print()
print()
print("Starting New Run for RandomForestClassifier")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
#1. criterion ('gini', 'entropy')
criterion = ['gini', 'entropy']
#2. max_features ('auto', 'log2')
max_features = ['auto', 'log2', None]
#3. max_depth ('none', IF A NUMBER EXISTS +2, +4, -2, -4 ELSE 4 RANDOM INTS 1->100)
if input_model.max_depth == None:
max_depth = [None, random.randrange(100), random.randrange(100)]
else:
max_depth = [input_model.max_depth, input_model.max_depth - 2,
input_model.max_depth + 2, random.randrange(100)]
max_depth = [i for i in max_depth if i > 0]
#4. min_samples_leaf(n-1, n-2, 0, n+1, n+2)
min_samples_leaf = [input_model.min_samples_leaf, input_model.min_samples_leaf - 2,
input_model.min_samples_leaf + 2, random.randrange(100)]
min_samples_leaf = [i for i in min_samples_leaf if i > 0]
#5. max_leaf_nodes:('none', n+1, n+2, n-1, n-2, OR 4 random numbers)
if input_model.max_leaf_nodes == None:
max_leaf_nodes = [None, random.randrange(1000), random.randrange(1000)]
else:
max_leaf_nodes = [input_model.max_leaf_nodes, input_model.max_leaf_nodes - 2,
input_model.max_leaf_nodes + 2, random.randrange(1000)]
max_leaf_nodes = [i for i in max_leaf_nodes if i > 0]
n_estimators = [input_model.n_estimators, input_model.n_estimators - 2,
input_model.n_estimators + 2, random.randrange(200)]
n_estimators = [i for i in n_estimators if i > 0]
for n in n_estimators:
for ml in max_leaf_nodes:
for ms in min_samples_leaf:
for md in max_depth:
for mf in max_features:
for c in criterion:
test_model = RandomForestClassifier(n_estimators = n, max_leaf_nodes = ml,
min_samples_leaf = ms,
max_depth = md, criterion = c,
max_features = mf,
n_jobs = -1,
random_state=75)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("Criterion:", c, "max_features:", mf, "max_depth:", md, "min_samples_leaf:", ms,
"max_leaf_nodes:", ml, "n_estimators", n, best_score)
print()
print()
else:
pass
print("Criterion:", c, "max_features:", mf, "max_depth:", md, "min_samples_leaf:", ms,
"max_leaf_nodes:", ml, "n_estimators", n, score)
return output_model
def tune_GradientBoostingClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. criterion ('friedman_mse', 'mse', 'mae')
#2. loss ('deviance', 'exponential')
#3. n_estimators (n, n+1, n-1)
#4. learning_rate (learning_rate, learning_rate *1.1, learning_rate*.9)
#5. min_samples_leaf: (n, n-1, n+1)
#6. max_depth: (n, n+1, n-1)
#7. max_features: (None, 'auto', 'sqrt', 'log2')
#8. max_leaf_nodes: (None, n+1, n-1, OR 2 random numbers)
#9. tol (n, n*1.1, n*.9)
###############################################################################################################
print()
print()
print("Starting New Run")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
#1. criterion ('friedman_mse', 'mse', 'mae')
criterion = ['friedman_mse']
#2. loss ('deviance', 'exponential')
loss = ['deviance']
#3. n_estimators (n, n+1, n-1)
n_estimators = [input_model.n_estimators, input_model.n_estimators - 1, input_model.n_estimators + 1,
random.randrange(200)]
n_estimators = [i for i in n_estimators if i > 0]
#4. learning_rate (learning_rate, learning_rate *1.1, learning_rate*.9)
learning_rate = [input_model.learning_rate]
#5. min_samples_leaf: (n, n-1, n+1)
min_samples_leaf = [input_model.min_samples_leaf, input_model.min_samples_leaf - 1,
input_model.min_samples_leaf + 1]
min_samples_leaf = [i for i in min_samples_leaf if i > 0]
#6. max_depth: (n, n+1, n-1)
if input_model.max_depth == None:
max_depth = [None, random.randrange(100), random.randrange(100)]
else:
max_depth = [input_model.max_depth, input_model.max_depth - 1,
input_model.max_depth + 1, random.randrange(100)]
max_depth = [i for i in max_depth if i > 0]
#7. max_features: (None, 'auto', 'sqrt', 'log2')
max_features = ['sqrt', 'log2', None]
#8. max_leaf_nodes: (None, n+1, n-1, OR 2 random numbers)
if input_model.max_leaf_nodes == None:
max_leaf_nodes = [None, random.randrange(1000), random.randrange(1000)]
else:
max_leaf_nodes = [input_model.max_leaf_nodes, input_model.max_leaf_nodes - 1, input_model.max_leaf_nodes + 1,
random.randrange(1000)]
max_leaf_nodes = [i for i in max_leaf_nodes if i > 0]
#9. tol (n, n*1.1, n*.9)
tol = [input_model.tol, input_model.tol * 1.2, input_model.tol * .8]
print(len(tol) * len(max_leaf_nodes) * len(max_features) * len(max_depth) * len(min_samples_leaf) * len(learning_rate) * len(n_estimators) * len(loss) * len(criterion))
for t in tol:
for ml in max_leaf_nodes:
for mf in max_features:
for md in max_depth:
for ms in min_samples_leaf:
for lr in learning_rate:
for n in n_estimators:
for l in loss:
for c in criterion:
test_model = GradientBoostingClassifier(n_estimators = n,
learning_rate = lr,
criterion = c,
min_samples_leaf = ms,
max_depth = md,
loss = l,
max_features = mf,
max_leaf_nodes = ml,
tol = t,
random_state=75)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("Criterion:", c,
"n_estimators:", n,
"Loss:", l,
"Learning Rate:", lr,
"Min Samples/Leaf:", ms,
"Max Depth:", md,
"Max Features:", mf,
"Max Leaf Nodes:", ml,
"tol:", t,
"Best Score:", best_score)
print()
print()
else:
pass
print("Criterion:", c,
"n_estimators:", n,
"Loss:", l,
"Learning Rate:", lr,
"Min Samples/Leaf:", ms,
"Max Depth:", md,
"Max Features:", mf,
"Max Leaf Nodes:", ml,
"tol:", t,
"Score:", score)
return(output_model)
def tune_GaussianNB(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. var_smoothing (1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6)
###############################################################################################################
print()
print()
print("Starting New Run for GaussianNB")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
var_smoothing = [1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6]
for v in var_smoothing:
test_model = GaussianNB(var_smoothing = v)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("var_smoothing:", v,
"Best Score:", best_score)
print()
print()
else:
pass
print("var_smoothing:", v,
"Score:", score)
return output_model
def tune_hyperparameters(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
best_model = input_model
keep_going = True
if isinstance(input_model, LogisticRegression):
while(keep_going):
pos_model = (tune_LogisticRegression(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, DecisionTreeClassifier):
while(keep_going):
pos_model = (tune_DecisionTreeClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, RandomForestClassifier):
while(keep_going):
pos_model = (tune_RandomForestClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, GradientBoostingClassifier):
print("HI")
while(keep_going):
pos_model = (tune_GradientBoostingClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, GaussianNB):
while(keep_going):
pos_model = (tune_GaussianNB(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
else:
output_model = input_model
return(output_model)
def tune_ev(input_model, input_features, input_df, input_labels, odds_input, verbose=False):
best_ev = 0
best_pos = -1
for temp_ev in range(200):
pos_ev = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=temp_ev, verbose=verbose,
get_total=True)
print(temp_ev, pos_ev)
if pos_ev > best_ev:
best_ev = pos_ev
best_pos = temp_ev
return best_pos
def remove_to_improve(cur_features, m, df, labels, odds, scale=False, min_ev = 0):
#If the list is empty we can just return it without doing anything
number_of_features = len(cur_features)
df_sel = df[cur_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
labels_sel = labels[labels.index.isin(df_sel.index)]
odds_sel = odds[odds.index.isin(df_sel.index)]
orig_score = custom_cv_eval(df_sel, m, labels_sel, odds_sel, get_total=True, min_ev = min_ev)
#print(orig_score)
best_features = cur_features
best_score = orig_score
print(f"The original score is {orig_score}")
if number_of_features == 0:
return []
for z in range(number_of_features):
temp_features = cur_features.copy()
#Remove a feature
del temp_features[z]
df_sel = df[temp_features]
df_sel = df_sel.dropna()
df_sel = | pd.get_dummies(df_sel) | pandas.get_dummies |
import logging
import math
import numpy as np
import pandas as pd
import os
import random
import sys
import unittest
from pathlib import Path
from logml.core import LogMl, MODEL_TYPE_CLASSIFICATION, MODEL_TYPE_REGRESSION
from logml.core.config import Config, CONFIG_CROSS_VALIDATION, CONFIG_DATASET, CONFIG_HYPER_PARAMETER_OPTMIMIZATION, CONFIG_LOGGER, CONFIG_MODEL
from logml.core.files import set_plots, MlFiles
from logml.core.log import MlLog
from logml.core.registry import MlRegistry, DATASET_AUGMENT, DATASET_CREATE, DATASET_INOUT, DATASET_PREPROCESS, DATASET_SPLIT, MODEL_CREATE, MODEL_EVALUATE, MODEL_PREDICT, MODEL_TRAIN
from logml.datasets import Datasets, DatasetsDf, DatasetsCv, DatasetsDf
from logml.datasets.df.category import CategoricalFieldPreprocessing
from logml.feature_importance.data_feature_importance import DataFeatureImportance
from logml.feature_importance.pvalue_fdr import LogisticRegressionWilks, MultipleLogisticRegressionWilks, PvalueLinear
from logml.models import Model
from logml.feature_importance import FeatureImportancePermutation
from logml.models.sklearn_model import ModelFactoryRandomForest
DEBUG = os.getenv('TEST_UNIT_DEBUG', False)
# DEBUG = True
def array_equal(a1, a2):
a1 = np.array(a1)
a2 = np.array(a2)
a1_nan_idx = np.isnan(a1)
a2_nan_idx = np.isnan(a2)
if not np.array_equal(a1_nan_idx, a2_nan_idx):
return False
return np.array_equal(a1[~a1_nan_idx], a2[~a2_nan_idx])
def is_close(x, y, epsilon=0.000001):
return abs(x - y) < epsilon
def is_sorted(x):
""" Is numpy array 'x' sorted? """
return np.all(x[:-1] <= x[1:])
def rm(file):
""" Delete file, if it exists """
if os.path.exists(file):
os.remove(file)
class TestLogMl(unittest.TestCase):
def setUp(self):
MlLog().set_log_level(logging.CRITICAL)
if DEBUG:
MlLog().set_log_level(logging.DEBUG)
set_plots(disable=True, show=False, save=False)
MlRegistry().reset()
def test_class_CategoricalFieldPreprocessing_001(self):
""" Test CategoricalFieldPreprocessing: Undefined categories """
xi = pd.Series(['small', 'mid', 'large', 'small', 'mid', 'large', 'small', 'mid', 'large', None])
cfp = CategoricalFieldPreprocessing('test_field', xi, categories=None, one_based=False, scale=False, strict=True, is_output=False, convert_to_missing=None)
cfp()
codes = cfp.codes.to_numpy()
# Check categorical values converted to codes
self.assertEqual(-1, codes.min(), f"Min codes: {codes.min()}")
self.assertEqual(2, codes.max(), f"Max codes: {codes.max()}")
self.assertEqual(np.int8, codes.dtype, f"Codes.dtype: {codes.dtype}")
# Check that undefined values are -1
self.assertEqual(-1, cfp.codes[9], f"Missing code: {cfp.codes[9]}")
def test_class_CategoricalFieldPreprocessing_002(self):
""" Test CategoricalFieldPreprocessing: Set categories """
xi = pd.Series(['small', 'mid', 'large', 'small', 'mid', 'large', 'small', 'mid', 'large', None])
cfp = CategoricalFieldPreprocessing('test_field', xi, categories=['small', 'mid', 'large'], one_based=False, scale=False, strict=True, is_output=False, convert_to_missing=None)
cfp()
codes = cfp.codes.to_numpy()
codes_expected = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, -1])
# Check categorical values converted to codes: zero-based
self.assertEqual(-1, cfp.codes.min(), f"Min codes: {cfp.codes.min()}")
self.assertEqual(2, cfp.codes.max(), f"Max codes: {cfp.codes.max()}")
self.assertEqual(np.int8, codes.dtype, f"Codes.dtype: {codes.dtype}")
self.assertTrue(np.array_equal(codes_expected, codes), f"Codes expected: {codes_expected}\n\tCodes: {codes}")
# Check that undefined values are -1
self.assertEqual(-1, cfp.codes[9], f"Missing code: {cfp.codes[9]}")
def test_class_CategoricalFieldPreprocessing_003(self):
""" Test CategoricalFieldPreprocessing: Set categories, one_based """
xi = pd.Series(['small', 'mid', 'large', 'small', 'mid', 'large', 'small', 'mid', 'large', None])
cfp = CategoricalFieldPreprocessing('test_field', xi, categories=['small', 'mid', 'large'], one_based=True, scale=False, strict=True, is_output=False, convert_to_missing=None)
cfp()
codes = cfp.codes.to_numpy()
# Check categorical values converted to codes: one-based
codes_expected = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3, 0])
self.assertEqual(0, cfp.codes.min(), f"Min codes: {cfp.codes.min()}")
self.assertEqual(3, cfp.codes.max(), f"Max codes: {cfp.codes.max()}")
self.assertTrue(np.array_equal(codes_expected, codes), f"Codes expected: {codes_expected}\n\tCodes: {codes}")
self.assertEqual(np.int8, codes.dtype, f"Codes.dtype: {codes.dtype}")
# Check that undefined values are 0
self.assertEqual(0, cfp.codes[9], f"Missing code: {cfp.codes[9]}")
def test_class_CategoricalFieldPreprocessing_004(self):
""" Test CategoricalFieldPreprocessing: Set categories, scale """
xi = | pd.Series(['small', 'mid', 'large', 'small', 'mid', 'large', 'small', 'mid', 'large', None]) | pandas.Series |
#################################################
#created the 27/04/2018 15:32 by <NAME>#
#################################################
#-*- coding: utf-8 -*-
'''
'''
'''
Amรฉliorations possibles:
'''
import warnings
warnings.filterwarnings('ignore')
#################################################
########### Imports #################
#################################################
import os
import sys
import numpy as np
import pandas as pd
#################################################
########### Global variables ####################
#################################################
IRRELEVANT = ['@DATEMODIF','@CLEDIF','DATEHEURE','RATIO','HEURE','@CLEEMI']
#################################################
########### Important functions #################
#################################################
def updateargs(CHAINE,DATE):
'''
Join in diffent ways the date and the channel to match
the way files are named
ENTRY: take the two arguments pass to the program as strings
OUT: return two other strings that are modifications of the entries
'''
JOINDATE = "".join(list(DATE.split('-')))
c = list(CHAINE)
n = 4-len(c)
c = ['0']*n
a = "".join(c)
CHAINE2 = a+CHAINE
return(JOINDATE,CHAINE2)
def dfjoin(df,tdf):
'''
create a join subset of RTS and PTV on wich we will join the two files
IN: take in entrie two subsets of the RTS and PTV files
OUT: return a join dataframe in wich with have infos from PTV and RTS
'''
for index, row in df.iterrows():
for index2, row2 in tdf.iterrows():
if(int(row2['debut']) < int(row['minutes']%1440) < int(row2['fin'])):
df.set_value(index, 'isinprogramme', 1)
df.set_value(index, 'fin', row2['fin'])
df.set_value(index, 'debut', int(row2['debut']))
df.ix[index,'TITRE']=row2['titre']
if(int(row2['debut']) < int(row['minutes']) < int(row2['fin'])):
df.set_value(index, 'isinprogramme', 1)
df.set_value(index, 'fin', row2['fin'])
df.set_value(index, 'debut', int(row2['debut']))
df.ix[index,'TITRE']=row2['titre']
else:
pass
df[df['isinprogramme']==0]['titre'] = 'en dehors de programmes'
return df
def get_features_from_join(df):
'''
Extraction of feature based of the merge of the two files RTS and PTV
IN: DataFrame of join files
OUT: modified dataframe of the two files
'''
df['temps depuis debut'] = 0
df['temps avant fin'] = 0
df['pourcentage dรฉjร vu'] = 0
df['temps depuis debut'] = df[df['isinprogramme'] == 1].apply(lambda row: min(abs(row['minutes'] - row['debut']),abs(row['minutes']%1440 - row['debut'])),axis = 1)
df['temps avant fin'] = df[df['isinprogramme'] == 1].apply(lambda row: min(abs(row['fin'] - row['minutes']),abs(row['fin'] - row['minutes']%1440)),axis = 1)
df = df.fillna(1)
df['pourcentage dรฉjร vu'] = df['temps depuis debut']/df['DUREE']
return df
def processing(X_RTS,X_PTV):
'''
Process the two data files to get a join DataFrame with cross infos
IN: two DataFrame of RTS and PTV
OUT: a DataFrame of all the valuable infos
'''
X_RTS['minutes'] = X_RTS['minutes']+180
# Creating temp DataFrame to make the join possible
tdf = pd.DataFrame()
tdf['debut'] = X_PTV['debut']
tdf['fin'] = tdf['debut']+X_PTV['DUREE']
tdf['titre'] = X_PTV['TITRE']
# creating the final dataframe and fill it with values from both Dataframe
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Investopedia's S&P 500 Top Performers
# <div class="alert alert-block alert-success">
# <b>Note:</b> The next lines of code does link to the webpage in which we will scrape our information.
# </div>
# <div>
# <ul id="journey-nav__sublist_1-0" class="comp journey-nav__sublist .js-animation">
# <li class="journey-nav__sublist-item journey-nav__sublist-item-overview">
# <a href="https://www.investopedia.com/top-stocks-4581225">Overview</a>
# </li>
# <li class="journey-nav__sublist-item is-active">
# <a href="https://www.investopedia.com/top-communications-stocks-4583180">Top Communications Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/investing/consumer-cyclical-stocks/">Top Consumer Discretionary Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/investing/consumer-defensive-stocks/">Top Consumer Staples Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/top-energy-stocks-4582081">Top Energy Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/top-financial-stocks-4582168">Top Financial Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/investing/top-healthcare-stocks/">Top Healthcare Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/top-industrial-stocks-4582171">Top Industrial Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/top-materials-stocks-4582152">Top Materials Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/top-reits-4582128">Top Real Estate Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/top-tech-stocks-4581295">Top Technology Stocks</a>
# </li>
# <li class="journey-nav__sublist-item ">
# <a href="https://www.investopedia.com/top-utilities-stocks-4582243">Top Utilities Stocks</a>
# </li>
# </ul>
# </div>
#
#
# ## Step 1: Install Python packages
import os
import pandas as pd
import html5lib
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from datetime import date, timedelta, datetime as dt
from pymongo import MongoClient
# ## Step 2: Preparation
# <div class="alert alert-block alert-info">
# <b>Note:</b> The next lines of code is the connection to a mongo databaste which will be seperated into different collections through this document.
# </div>
#
# <div class="alert alert-block alert-warning">
# <b>Note:</b> The next lines of code is the connection to a mongo databaste which will be seperated into different collections through this document.
# </div>
# <div class="alert alert-block alert-success">
# <b>Note:</b> The next lines of code is the connection to a mongo databaste which will be seperated into different collections through this document.
# </div>
# Connect to MongoDB
client = MongoClient("mongodb://localhost:27017")
db = client['investopedia']
# <div class="alert alert-block alert-danger">
# <b>Warning:</b> The next lines of code is only usable for the chrome driver that leads to my user directory.
# </div>
#4]:
# class RemoteDriverStartService():
# options = webdriver.ChromeOptions()
# # Set user app data to a new directory
# options.add_argument("user-data-dir=C:\\Users\\Donley\\App Data\\Google\\Chrome\\Application\\User Data\\Kit")
# options.add_experimental_option("Proxy", "null")
# options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
# # Create a download path for external data sources as default:
# options.add_experimental_option("prefs", {
# "download.default_directory": r"C:\Users\Donley\Documents\GA_TECH\SUBMISSIONS\PROJECT2-CHALLENGE\data\external",
# "download.prompt_for_download": False,
# "download.directory_upgrade": True,
# "safebrowsing.enabled": True
# }),
# # Add those optional features to capabilities
# caps = options.to_capabilities()
# def start_driver(self):
# return webdriver.Remote(command_executor='http://127.0.0.1:4444',
# desired_capabilities=self.caps)
# # Set class equal to new capabilities:
# DesiredCapabilities = RemoteDriverStartService()
#7]:
def investopediaScrape():
# Create variables for scraping:
investopedia = "https://www.investopedia.com/top-communications-stocks-4583180"
# Download data to paths, csv's, json, etc:
# for external data sources
external = "../data/external/"
# for processed data sources with ID's
processed = "../data/processed/"
# current_path = os.getcwd()
# Path = os.path.join(current_path, "geckodriver.exe")
driver = webdriver.Firefox(executable_path= "geckodriver.exe")
driver.get(investopedia)
driver.maximize_window()
timeout = 20
# Find an ID on the page and wait before executing anything until found:
try:
WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.ID, "main_1-0")))
except TimeoutException:
driver.quit()
#10]:
# # Locate Driver in system
# current_path = os.getcwd()
# # save the .exe file under the same directory of the web-scrape python script.
# Path = os.path.join(current_path, "chromedriver")
# # Initialize Chrome driver and start browser session controlled by automated test software under Kit profile.
# caps = webdriver.DesiredCapabilities.CHROME.copy()
# caps['acceptInsecureCerts'] = True
# # caps = webdriver.DesiredCapabilities.CHROME.copy()
# # caps['acceptInsecureCerts'] = True
# # driver = webdriver.Chrome(options=options, desired_capabilities=caps)
# driver = webdriver.Chrome(executable_path='chromedriver', desired_capabilities=caps)
# ## Step 3: Find the IDs of the items we want to scrape for
#11]:
# Start Grabbing Information from investopedia:
# ## Step 4: Techniques to make more human-like web-scrapers
#12]:
# If the website detects us as a web-scraper, it will cut our connection so we cannot pull more data and have to re-start our scraper. This largely impacts the efficiency of the scraper and involves a lot of manual interference. There are a few techniques we can use to make the scraper more human-like:
# (1) Randomize the sleep time
# This can be easily implemented as below wherever needed:
# #sleep for sometime between 5 and 8 seconds
# time.sleep(random.uniform(5,8))
# (2) Randomize the user agent for the web browser
# This is also easy and can be added to the browser options as below:
# ua = UserAgent()
# userAgent = ua.random
# Firefox_options = webdriver.FirefoxOptions()
# Firefox_options.add_argument(fโuser-agent={userAgent}โ)
# browser = webdriver.Firefox(executable_path = DRIVER_BIN, options=Firefox_options)
# (3) Use dynamic proxy/IP
# This requires more work than the above two. Usually free proxies are not stable and most of them donโt respond to requests, so we need to first a free proxy that responds to our requests. This website (also named as โurlโ in the script below) provides a lot of free proxies which we scrape down for our use. We will use Python BeautifulSoup package to scrape a list of proxies, and use Python requests package to test whether the proxy responds to our requests to the link.
# def get_proxy(link):
# url = "https://www.sslproxies.org/"
# r = requests.get(url)
# soup = BeautifulSoup(r.content, 'html5lib')
# proxies_list = list(map(lambda x: x[0]+':'+x[1], list(zip(map(lambda x: x.text, soup.findAll('td')[::8]), map(lambda x: x.text, soup.findAll('td')[1::8])))))
# while 1:
# try:
# selected_ip = choice(proxies_list)
# proxy = {'https': selected_ip, 'http': selected_ip}
# headers = {'User-Agent': ua.random}
# print('Using proxy:{}'.format(proxy))
# r = requests.request('get', link, proxies=proxy, headers=headers, timeout=5)
# break
# except:
# pass
# return proxy
# We then add the working proxy to the browser option, similar to how we added the fake user agent:
# link = "https://www.expedia.com"
# proxy = get_proxy(link)
# Firefox_options.add_argument('--proxy-server=%s' % proxy)
# browser = webdriver.Firefox(executable_path = DRIVER_BIN, options=Firefox_options)
#19]:
top_sector_stocks = driver.find_element_by_id("journey-nav__sublist_1-0").get_attribute('innerHTML')
top_sector_stocks
#20]:
soup = bs(top_sector_stocks, 'lxml')
#25]:
top_sector_links = [top_sector_stocks.get('href') for top_sector_stocks in soup.find_all('a')]
top_sector_links
#26]:
top_sectors = ["Top Communications Stocks", "Top Consumer Discretionary Stocks", "Top Consumer Staples Stocks", "Top Energy Stocks", "Top Financial Stocks", "Top Healthcare Stocks",
"Top Industrial Stocks", "Top Materials Stocks", "Top Real Estate Stocks", "Top Technology Stocks", "Top Utilities Stocks"]
#27]:
# Find all links to use driver.get() and pull all tables:
top_communications = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[0])]
top_discretionary = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[1])]
top_staples = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[2])]
top_energy = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[3])]
top_financial = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[4])]
top_healthcare = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[5])]
top_industrial = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[6])]
top_materials = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[7])]
top_real = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[8])]
top_technology = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[9])]
top_utilities = [links.get_attribute("href") for links in driver.find_elements_by_link_text(top_sectors[10])]
# ]:
# ]:
# ## Step 5: The full code that runs the scraper and save the data to .csv files
#
#28]:
driver.get(top_communications[0])
#29]:
itable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
itables = pd.read_html(itable)
communications_bv = itables[0]
communications_bv.columns = ["Communictaions Best Value", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
communications_bv
# Locate column containing ticker symbols:
communications_bv_df = communications_bv.iloc[1:]
# Only keep tick information within parentheses:
communications_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_bv_df["Communictaions Best Value"]]
communications_bv_ticks
#30]:
communications_fg = itables[1]
communications_fg.columns = ["Communications Fastest Growing", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
communications_fg_df = communications_fg.iloc[1:]
communications_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_fg_df["Communications Fastest Growing"]]
communications_fg_ticks
#31]:
communications_mm = itables[2]
communications_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
communications_mm_df = communications_mm.iloc[1:]
communications_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_mm_df["Communications Most Momentum"]]
del communications_mm_ticks[-2:]
communications_mm_ticks
#32]:
driver.get(top_discretionary[0])
#33]:
dtable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
dtables = pd.read_html(dtable)
discretionary_bv = dtables[0]
discretionary_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
discretionary_bv
# Locate column containing ticker symbols:
discretionary_bv_df = discretionary_bv.iloc[1:]
# Only keep tick information within parentheses:
discretionary_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in discretionary_bv_df["tick"]]
discretionary_bv_ticks
#34]:
discretionary_fg = dtables[1]
discretionary_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
discretionary_fg_df = discretionary_fg.iloc[1:]
discretionary_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in discretionary_fg_df["stock"]]
discretionary_fg_ticks
#35]:
discretionary_mm = itables[2]
discretionary_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
discretionary_mm_df = discretionary_mm.iloc[1:]
discretionary_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in discretionary_mm_df["Communications Most Momentum"]]
del discretionary_mm_ticks[-2:]
discretionary_mm_ticks
#36]:
driver.get(top_staples[0])
#37]:
stable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
stables = pd.read_html(stable)
staples_bv = stables[0]
staples_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
staples_bv
# Locate column containing ticker symbols:
staples_bv_df = staples_bv.iloc[1:]
# Only keep tick information within parentheses:
staples_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in staples_bv_df["tick"]]
staples_bv_ticks
#38]:
staples_fg = stables[1]
staples_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
staples_fg_df = staples_fg.iloc[1:]
staples_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in staples_fg_df["stock"]]
staples_fg_ticks
#39]:
staples_mm = stables[2]
staples_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
staples_mm_df = staples_mm.iloc[1:]
staples_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in staples_mm_df["Communications Most Momentum"]]
del staples_mm_ticks[-2:]
staples_mm_ticks
#40]:
driver.get(top_energy[0])
#41]:
etable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
etables = pd.read_html(etable)
energy_bv = etables[0]
energy_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
energy_bv
# Locate column containing ticker symbols:
energy_bv_df = energy_bv.iloc[1:]
# Only keep tick information within parentheses:
energy_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in energy_bv_df["tick"]]
energy_bv_ticks
#42]:
energy_fg = etables[1]
energy_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
energy_fg_df = energy_fg.iloc[1:]
energy_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in energy_fg_df["stock"]]
energy_fg_ticks
#43]:
energy_mm = etables[2]
energy_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
energy_mm_df = energy_mm.iloc[1:]
energy_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in energy_mm_df["Communications Most Momentum"]]
del energy_mm_ticks[-2:]
energy_mm_ticks
#44]:
driver.get(top_financial[0])
#45]:
ftable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
ftables = pd.read_html(ftable)
financial_bv = ftables[0]
financial_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
financial_bv
# Locate column containing ticker symbols:
financial_bv_df = financial_bv.iloc[1:]
# Only keep tick information within parentheses:
financial_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in financial_bv_df["tick"]]
financial_bv_ticks
#46]:
financial_fg = ftables[1]
financial_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
financial_fg_df = financial_fg.iloc[1:]
financial_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in financial_fg_df["stock"]]
financial_fg_ticks
#47]:
financial_mm = itables[2]
financial_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
financial_mm_df = financial_mm.iloc[1:]
financial_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in financial_mm_df["Communications Most Momentum"]]
del financial_mm_ticks[-2:]
financial_mm_ticks
#48]:
driver.get(top_healthcare[0])
#49]:
htable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
htables = pd.read_html(htable)
healthcare_bv = htables[0]
healthcare_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
healthcare_bv
# Locate column containing ticker symbols:
healthcare_bv_df = healthcare_bv.iloc[1:]
# Only keep tick information within parentheses:
healthcare_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in healthcare_bv_df["tick"]]
healthcare_bv_ticks
#50]:
healthcare_fg = htables[1]
healthcare_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
healthcare_fg_df = healthcare_fg.iloc[1:]
healthcare_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in healthcare_fg_df["stock"]]
healthcare_fg_ticks
#51]:
healthcare_mm = htables[2]
healthcare_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
healthcare_mm_df = healthcare_mm.iloc[1:]
healthcare_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in healthcare_mm_df["Communications Most Momentum"]]
del healthcare_mm_ticks[-2:]
healthcare_mm_ticks
#52]:
driver.get(top_industrial[0])
#53]:
intable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
intables = pd.read_html(intable)
industrial_bv = intables[0]
industrial_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
industrial_bv
# Locate column containing ticker symbols:
industrial_bv_df = industrial_bv.iloc[1:]
# Only keep tick information within parentheses:
industrial_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in industrial_bv_df["tick"]]
industrial_bv_ticks
#54]:
industrial_fg = intables[1]
industrial_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
industrial_fg_df = industrial_fg.iloc[1:]
industrial_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in industrial_fg_df["stock"]]
industrial_fg_ticks
#55]:
industrial_mm = intables[2]
industrial_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
industrial_mm_df = industrial_mm.iloc[1:]
industrial_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in industrial_mm_df["Communications Most Momentum"]]
del industrial_mm_ticks[-2:]
industrial_mm_ticks
#56]:
driver.get(top_materials[0])
#57]:
motable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
motables = | pd.read_html(motable) | pandas.read_html |
from sstcam_sandbox import get_data, get_checs
from CHECLabPy.core.io import HDF5Writer
from TargetCalibSB.pedestal import PedestalTargetCalib
from TargetCalibSB.stats import OnlineStats, OnlineHist
from CHECLabPy.core.io import TIOReader
from tqdm import tqdm
import re
import pandas as pd
from glob import glob
def process(r0_paths, pedestal_path, output_path):
data = []
for ipath, r0_path in enumerate(r0_paths):
print(f"Processing: {ipath+1}/{len(r0_paths)}")
regex_r0 = re.search(r".+_tc([\d.]+)_tmc([\d.]+).tio", r0_path)
temperature_r0_chamber = float(regex_r0.group(1))
temperature_r0_primary = float(regex_r0.group(2))
regex_ped = re.search(r".+_tc([\d.]+)_tmc([\d.]+)_ped.tcal", pedestal_path)
temperature_pedestal_chamber = float(regex_ped.group(1))
temperature_pedestal_primary = float(regex_ped.group(2))
reader = TIOReader(r0_path)
pedestal = PedestalTargetCalib(
reader.n_pixels, reader.n_samples, reader.n_cells
)
pedestal.load_tcal(pedestal_path)
online_stats = OnlineStats()
online_hist = OnlineHist(bins=100, range_=(-10, 10))
# Subtract Pedestals
desc = "Subtracting pedestal"
for wfs in tqdm(reader, total=reader.n_events, desc=desc):
if wfs.missing_packets:
continue
subtracted_tc = pedestal.subtract_pedestal(wfs, wfs.first_cell_id)[[0]]
online_stats.add_to_stats(subtracted_tc)
online_hist.add(subtracted_tc)
data.append(dict(
temperature_r0_chamber=temperature_r0_chamber,
temperature_r0_primary=temperature_r0_primary,
temperature_pedestal_chamber=temperature_pedestal_chamber,
temperature_pedestal_primary=temperature_pedestal_primary,
mean=online_stats.mean,
std=online_stats.std,
hist=online_hist.hist,
edges=online_hist.edges,
))
with HDF5Writer(output_path) as writer:
writer.write(data= | pd.DataFrame(data) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
"""
Created on Mon Feb 22 15:52:51 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
import os
import pickle
import calendar
import time
import warnings
from pyproj import Transformer
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
from requests import get
import dataframe_key
def compile_chicago_stations():
"""
Reads data files containing information about docking stations in Chicago
and compiles the data into a dataframe. The dataframe is then saved as a
pickle for further use.
The relevant files can be found at:
https://divvy-tripdata.s3.amazonaws.com/index.html
https://data.cityofchicago.org/Transportation/Divvy-Bicycle-Stations-All-Map/bk89-9dk7
Raises
------
FileNotFoundError
Raised if no data files containing station data are found.
Returns
-------
stat_df : pandas DataFrame
Dataframe of all docking station information.
"""
try:
with open('./python_variables/Chicago_stations.pickle', 'rb') as file:
stat_df = pickle.load(file)
except FileNotFoundError as exc:
print('No pickle found. Creating pickle...')
stat_files = [file for file in os.listdir('data') if 'Divvy_Stations' in file]
col_list = ['id', 'name', 'latitude', 'longitude']
key = {'ID':'id', 'Station Name':'name', 'Latitude':'latitude','Longitude':'longitude'}
try:
stat_df = pd.read_csv(
'data/Divvy_Bicycle_Stations_-_All_-_Map.csv').rename(columns = key)
stat_df = stat_df[col_list]
except FileNotFoundError:
stat_df = pd.DataFrame(columns = col_list)
for file in stat_files:
df = pd.read_csv(f'./data/{file}')[col_list]
stat_df = pd.concat([stat_df, df], sort = False)
if stat_df.size == 0:
raise FileNotFoundError(
'No data files containing station data found. Please read the docstring for more information.') from exc
stat_df.drop_duplicates(subset = 'name', inplace = True)
with open('./python_variables/Chicago_stations.pickle', 'wb') as file:
pickle.dump(stat_df, file)
print('Pickle loaded')
return stat_df
def get_JC_blacklist():
"""
Constructs/updates a blacklist of stations in Jersey City area. The
blacklist is created using historical biketrip datasets for the area.
Use only if you know what you are doing.
The relevant files can be found at:
https://www.citibikenyc.com/system-data
Raises
------
FileNotFoundError
Raised if no Jersey City dataset is found.
Returns
-------
blacklist : list
List of IDs of the Jersey City docking stations.
"""
try:
with open('./python_variables/JC_blacklist', 'rb') as file:
blacklist = pickle.load(file)
except FileNotFoundError:
print('No previous blacklist found. Creating blacklist...')
blacklist = set()
JC_files = [file for file in os.listdir('data') if 'JC' in file]
if len(JC_files) == 0:
raise FileNotFoundError(
'No JC files found. Please have a JC file in the data directory to create/update blacklist.')
for file in JC_files:
df = pd.read_csv('data/' + file)
df = df.rename(columns = dataframe_key.get_key('nyc'))
JC_start_stat_indices = df.loc[df['start_stat_long'] < 74.02]
JC_end_stat_indices = df.loc[df['end_stat_long'] < 74.02]
stat_IDs = set(
df['start_stat_id'][JC_start_stat_indices]) | set(df['end_stat_id'][JC_end_stat_indices])
blacklist = blacklist | stat_IDs
with open('./python_variables/JC_blacklist', 'wb') as file:
pickle.dump(blacklist, file)
print('Blacklist updated')
return blacklist
def days_index(df):
"""
Find indices of daily trips.
Parameters
----------
df : pandas DataFrame
Dataframe containing bikeshare trip data with columns that have been
renamed to the common key.
Returns
-------
d_i : dict
Contains the indices of the first trip per day.
"""
days = df['start_dt'].dt.day
d_i = [(days == i).idxmax() for i in range(1, max(days)+1)]
return dict(zip(range(1, max(days)+1), d_i))
def pickle_data(df, city, year, month):
"""
Generate pickle of days' starting indices.
Parameters
----------
df : pandas DataFrame
bikeshare trip data with columns that have been renamed to the common
key.
city : str
The identification of the city. For a list of supported cities, see
the documentation for the Data class.
year : int
The year of interest in YYYY format.
month : int
The month of interest in MM format.
Returns
-------
d : dict
Contains the indices of the first trip per day.
"""
d = days_index(df)
with open(f'./python_variables/day_index_{city}{year:d}{month:02d}.pickle', 'wb') as file:
pickle.dump(d, file)
return d
def get_data(city, year, month, blacklist=None):
"""
Read data from csv files.
Parameters
----------
city : str
The identification of the city. For a list of supported cities, see
the documentation for the Data class.
year : int
The year of interest in YYYY format.
month : int
The month of interest in MM format.
blacklist : list, optional
List of IDs of stations to remove. Default is None.
Returns
-------
df : pandas DataFrame
Dataframe containing bikeshare trip data.
days : dict
Contains the indices of the first trip per day.
"""
supported_cities = ['nyc', 'sfran', 'sjose',
'washDC', 'chic', 'london',
'oslo', 'edinburgh', 'bergen',
'buenos_aires', 'madrid',
'mexico', 'taipei'] # Remember to update this list
if city not in supported_cities:
raise ValueError("This city is not currently supported. Supported cities are {}".format(supported_cities))
# Make folder for dataframes if not found
if not os.path.exists('python_variables/big_data'):
os.makedirs('python_variables/big_data')
try:
with open(f'./python_variables/big_data/{city}{year:d}{month:02d}_dataframe_blcklst={blacklist}.pickle', 'rb') as file:
df = pickle.load(file)
print('Pickle loaded')
except FileNotFoundError:
print('No dataframe pickle found. Pickling dataframe...')
if city == "nyc":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-citibike-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.citibikenyc.com/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
try:
with open('./python_variables/JC_blacklist', 'rb') as file:
JC_blacklist = pickle.load(file)
df = df[~df['start_stat_id'].isin(JC_blacklist)]
df = df[~df['end_stat_id'].isin(JC_blacklist)]
except FileNotFoundError:
print('No JC blacklist found. Continuing...')
df.dropna(inplace=True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "washDC":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-capitalbikeshare-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.capitalbikeshare.com/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df['start_stat_lat'] = ''
df['start_stat_long'] = ''
df['end_stat_lat'] = ''
df['end_stat_long'] = ''
stat_df = pd.read_csv('data/Capital_Bike_Share_Locations.csv')
for _ , stat in stat_df.iterrows():
start_matches = np.where(df['start_stat_id'] == stat['TERMINAL_NUMBER'])
end_matches = np.where(df['end_stat_id'] == stat['TERMINAL_NUMBER'])
df.at[start_matches[0], 'start_stat_lat'] = stat['LATITUDE']
df.at[start_matches[0], 'start_stat_long'] = stat['LONGITUDE']
df.at[end_matches[0], 'end_stat_lat'] = stat['LATITUDE']
df.at[end_matches[0], 'end_stat_long'] = stat['LONGITUDE']
df.replace('', np.nan, inplace = True)
df.dropna(inplace=True)
max_lat = 38.961029
min_lat = 38.792686
max_long= -76.909415
min_long= -77.139396
df = df.iloc[np.where(
(df['start_stat_lat'] < max_lat) &
(df['start_stat_lat'] > min_lat) &
(df['start_stat_long'] < max_long) &
(df['start_stat_long'] > min_long))]
df = df.iloc[np.where(
(df['end_stat_lat'] < max_lat) &
(df['end_stat_lat'] > min_lat) &
(df['end_stat_long'] < max_long) &
(df['end_stat_long'] > min_long))]
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "chic":
q = int(np.ceil(month/3))
try:
df = pd.read_csv(f'./data/Divvy_Trips_{year:d}_Q{q}.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.divvybikes.com/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
n_days = calendar.monthrange(year, month)[1]
df = df.iloc[np.where(df['start_t'] > f'{year:d}-{month:02d}-01 00:00:00')]
df = df.iloc[np.where(df['start_t'] < f'{year:d}-{month:02d}-{n_days} 23:59:59')]
df.reset_index(inplace = True, drop = True)
df['start_stat_lat'] = ''
df['start_stat_long'] = ''
df['end_stat_lat'] = ''
df['end_stat_long'] = ''
try:
with open('./python_variables/Chicago_stations.pickle', 'rb') as file:
stat_df = pickle.load(file)
except FileNotFoundError as exc:
compile_chicago_stations()
with open('./python_variables/Chicago_stations.pickle', 'rb') as file:
stat_df = pickle.load(file)
for _, stat in stat_df.iterrows():
start_matches = np.where(df['start_stat_name'] == stat['name'])
end_matches = np.where(df['end_stat_name'] == stat['name'])
df.at[start_matches[0], 'start_stat_lat'] = stat['latitude']
df.at[start_matches[0], 'start_stat_long'] = stat['longitude']
df.at[end_matches[0], 'end_stat_lat'] = stat['latitude']
df.at[end_matches[0], 'end_stat_long'] = stat['longitude']
df.replace('', np.nan, inplace = True)
df.dropna(subset = ['start_stat_lat',
'start_stat_long',
'end_stat_lat',
'end_stat_long'], inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
df['duration'] = df['duration'].str.replace(',', '').astype(float)
elif city == "sfran":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-baywheels-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.lyft.com/bikes/bay-wheels/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df = df.iloc[np.where(df['start_stat_lat'] > 37.593220)]
df = df.iloc[np.where(df['end_stat_lat'] > 37.593220)]
df.sort_values(by = 'start_t', inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "sjose":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-baywheels-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.lyft.com/bikes/bay-wheels/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df = df.iloc[np.where(df['start_stat_lat'] < 37.593220)]
df = df.iloc[np.where(df['end_stat_lat'] < 37.593220)]
df.sort_values(by = 'start_t', inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "london":
month_dict = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May',
6:'Jun', 7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct',
11:'Nov', 12:'Dec'}
data_files = [file for file in os.listdir('data') if 'JourneyDataExtract' in file]
data_files = [file for file in data_files if '{}'.format(year)
and '{}'.format(month_dict[month]) in file]
if len(data_files) == 0:
raise FileNotFoundError('No London data for {}. {} found. All relevant files can be found at https://cycling.data.tfl.gov.uk/.'.format(month_dict[month], year))
if isinstance(data_files, str):
warnings.warn('Only one data file found. Please check that you have all available data.')
df = pd.read_csv('./data/' + data_files[0])
for file in data_files[1:]:
df_temp = pd.read_csv('./data/' + file)
df = pd.concat([df, df_temp], sort = False)
df.rename(columns = dataframe_key.get_key(city), inplace = True)
n_days = calendar.monthrange(year, month)[1]
df = df.iloc[np.where(df['start_t'] >= f'01/{month:02d}/{year} 00:00')]
df = df.iloc[np.where(df['start_t'] <= f'{n_days}/{month:02d}/{year} 23:59')]
df.sort_values(by = 'start_t', inplace = True)
df.reset_index(inplace = True)
df['start_t'] = pd.to_datetime(df['start_t'], format = '%d/%m/%Y %H:%M').astype(str)
df['end_t'] = pd.to_datetime(df['end_t'], format = '%d/%m/%Y %H:%M').astype(str)
stat_df = pd.read_csv('./data/london_stations.csv')
stat_df.at[np.where(stat_df['station_id'] == 502)[0][0], 'latitude'] = 51.53341
df['start_stat_lat'] = ''
df['start_stat_long'] = ''
df['end_stat_lat'] = ''
df['end_stat_long'] = ''
for _ , stat in stat_df.iterrows():
start_matches = np.where(df['start_stat_name'] == stat['station_name'])
end_matches = np.where(df['end_stat_name'] == stat['station_name'])
df.at[start_matches[0], 'start_stat_lat'] = stat['latitude']
df.at[start_matches[0], 'start_stat_long'] = stat['longitude']
df.at[end_matches[0], 'end_stat_lat'] = stat['latitude']
df.at[end_matches[0], 'end_stat_long'] = stat['longitude']
df.replace('', np.nan, inplace = True)
df.dropna(inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
df = df[df.start_dt.dt.month == month]
df.reset_index(inplace = True, drop = True)
elif city == "oslo":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-oslo.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://oslobysykkel.no/en/open-data/historical') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "edinburgh":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-edinburgh.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://edinburghcyclehire.com/open-data/historical') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "bergen":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-bergen.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://bergenbysykkel.no/en/open-data/historical') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "buenos_aires":
try:
df_year = | pd.read_csv(f"./data/recorridos-realizados-{year:d}.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""5-token_classification-่ฏ_็ฌฆๅท_token็บงๅซๅ็ฑปไปปๅก.ipynb
ๅจ่ฟ่กๅๅ
ๆ ผไนๅ๏ผๅปบ่ฎฎๆจๆ็
ง้กน็ฎreadmeไธญๆ็คบ๏ผๅปบ็ซไธไธชไธ้จ็python็ฏๅข็จไบๅญฆไน ๏ผ็ถๅๅฎ่ฃ
ไพ่ตๅบใ
"""
# ! pip install datasets transformers seqeval
"""ๅฆๆๆจๆญฃๅจๆฌๅฐๆๅผ่ฟไธชnotebook๏ผ่ฏท็กฎไฟๆจ่ฎค็้
่ฏปๅนถๅฎ่ฃ
ไบtransformer-quick-start-zh็readmeๆไปถไธญ็ๆๆไพ่ตๅบใๆจไนๅฏไปฅๅจ[่ฟ้](https://github.com/huggingface/transformers/tree/master/examples/token-classification)ๆพๅฐๆฌnotebook็ๅคGPUๅๅธๅผ่ฎญ็ป็ๆฌใ
# Fine-tuningๅพฎ่ฐtransformerๆจกๅ็จไบtoken็บง็ๅ็ฑปไปปๅก๏ผๆฏๅฆNERไปปๅก๏ผ
ๅจ่ฟไธชnotebookไธญ๏ผๆไปฌๅฐๅฑ็คบๅฆไฝไฝฟ็จ[๐ค Transformers](https://github.com/huggingface/transformers)ไธญ็ๆจกๅๅปๅtoken็บงๅซ็ๅ็ฑป้ฎ้ขใtoken็บงๅซ็ๅ็ฑปไปปๅก้ๅธธๆ็ๆฏไธบไธบๆๆฌไธญ็ๆฏไธไธชtoken้ขๆตไธไธชๆ ็ญพ็ปๆใไธๅพๅฑ็คบ็ๆฏไธไธชNERๅฎไฝๅ่ฏ่ฏๅซไปปๅกใ

ๆๅธธ่ง็token็บงๅซๅ็ฑปไปปๅก:
- NER (Named-entity recognition ๅ่ฏ-ๅฎไฝ่ฏๅซ) ๅ่พจๅบๆๆฌไธญ็ๅ่ฏๅๅฎไฝ (personไบบๅ, organization็ป็ปๆบๆๅ, locationๅฐ็นๅ...).
- POS (Part-of-speech tagging่ฏๆงๆ ๆณจ) ๆ นๆฎ่ฏญๆณๅฏนtoken่ฟ่ก่ฏๆงๆ ๆณจ (nounๅ่ฏ, verbๅจ่ฏ, adjectiveๅฝขๅฎน่ฏ...)
- Chunk (Chunking็ญ่ฏญ็ปๅ) ๅฐๅไธไธช็ญ่ฏญ็tokens็ปๅๆพๅจไธ่ตทใ
ๅฏนไบไปฅไธไปปๅก๏ผๆไปฌๅฐๅฑ็คบๅฆไฝไฝฟ็จ็ฎๅ็ๅ ่ฝฝๆฐๆฎ้๏ผๅๆถ้ๅฏน็ธๅบ็ไปๆ ไฝฟ็จtransformerไธญ็`Trainer`ๆฅๅฃๅฏนๆจกๅ่ฟ่กๅพฎ่ฐใ
ๅช่ฆ้ข่ฎญ็ป็transformerๆจกๅๆ้กถๅฑๆไธไธชtokenๅ็ฑป็็ฅ็ป็ฝ็ปๅฑ๏ผ็ฑไบtransformer็tokenizerๆฐ็นๆง๏ผ่ฟ้่ฆๅฏนๅบ็้ข่ฎญ็ปๆจกๅๆfast tokenizer๏ผๅ่[่ฟไธช่กจ](https://huggingface.co/transformers/index.html#bigtable)๏ผ๏ผ้ฃไนๆฌnotebook็่ฎบไธๅฏไปฅไฝฟ็จๅ็งๅๆ ท็transformerๆจกๅ๏ผ[ๆจกๅ้ขๆฟ](https://huggingface.co/models)๏ผ๏ผ่งฃๅณไปปไฝtoken็บงๅซ็ๅ็ฑปไปปๅกใ
ๅฆๆๆจๆๅค็็ไปปๅกๆๆไธๅ๏ผๅคงๆฆ็ๅช้่ฆๅพๅฐ็ๆนๅจไพฟๅฏไปฅไฝฟ็จๆฌnotebook่ฟ่กๅค็ใๅๆถ๏ผๆจๅบ่ฏฅๆ นๆฎๆจ็GPUๆพๅญๆฅ่ฐๆดๅพฎ่ฐ่ฎญ็ปๆ้่ฆ็btach sizeๅคงๅฐ๏ผ้ฟๅ
ๆพๅญๆบขๅบใ
"""
task = "ner" # ้่ฆๆฏ"ner", "pos" ๆ่
"chunk"
model_checkpoint = "distilbert-base-uncased"
batch_size = 16
"""## ๅ ่ฝฝๆฐๆฎ
ๆไปฌๅฐไผไฝฟ็จ[๐ค datasets](https://github.com/huggingface/datasets)ๅบๆฅๅ ่ฝฝๆฐๆฎๅๅฏนๅบ็่ฏๆตๆนๅผใๆฐๆฎๅ ่ฝฝๅ่ฏๆตๆนๅผๅ ่ฝฝๅช้่ฆ็ฎๅไฝฟ็จ`load_dataset`ๅ`load_metric`ๅณๅฏใ
"""
from datasets import load_dataset, load_metric
"""ๆฌnotebookไธญ็ไพๅญไฝฟ็จ็ๆฏ[CONLL 2003 dataset](https://www.aclweb.org/anthology/W03-0419.pdf)ๆฐๆฎ้ใ่ฟไธชnotebookๅบ่ฏฅๅฏไปฅๅค็๐ค Datasetsๅบไธญ็ไปปไฝtokenๅ็ฑปไปปๅกใๅฆๆๆจไฝฟ็จ็ๆฏๆจ่ชๅฎไน็json/csvๆไปถๆฐๆฎ้๏ผๆจ้่ฆๆฅ็[ๆฐๆฎ้ๆๆกฃ](https://huggingface.co/docs/datasets/loading_datasets.html#from-local-files)ๆฅๅญฆไน ๅฆไฝๅ ่ฝฝใ่ชๅฎไนๆฐๆฎ้ๅฏ่ฝ้่ฆๅจๅ ่ฝฝๅฑๆงๅๅญไธๅไธไบ่ฐๆดใ"""
datasets = load_dataset("conll2003")
"""่ฟไธช`datasets`ๅฏน่ฑกๆฌ่บซๆฏไธ็ง[`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasetdict)ๆฐๆฎ็ปๆ. ๅฏนไบ่ฎญ็ป้ใ้ช่ฏ้ๅๆต่ฏ้๏ผๅช้่ฆไฝฟ็จๅฏนๅบ็key๏ผtrain๏ผvalidation๏ผtest๏ผๅณๅฏๅพๅฐ็ธๅบ็ๆฐๆฎใ"""
datasets
"""ๆ ่ฎบๆฏๅจ่ฎญ็ป้ใ้ช่ฏๆบ่ฟๆฏๆต่ฏ้ไธญ๏ผdatasets้ฝๅ
ๅซไบไธไธชๅไธบtokens็ๅ๏ผไธ่ฌๆฅ่ฏดๆฏๅฐๆๆฌๅๅๆไบๅพๅค่ฏ๏ผ๏ผ่ฟๅ
ๅซไธไธชๅไธบlabel็ๅ๏ผ่ฟไธๅๅฏนๅบ่ฟtokens็ๆ ๆณจใ
็ปๅฎไธไธชๆฐๆฎๅๅ็key๏ผtrainใvalidationๆ่
test๏ผๅไธๆ ๅณๅฏๆฅ็ๆฐๆฎใ
"""
datasets["train"][0]
"""ๆๆ็ๆฐๆฎๆ ็ญพlabels้ฝๅทฒ็ป่ขซ็ผ็ ๆไบๆดๆฐ๏ผๅฏไปฅ็ดๆฅ่ขซ้ข่ฎญ็ปtransformerๆจกๅไฝฟ็จใ่ฟไบๆดๆฐ็็ผ็ ๆๅฏนๅบ็ๅฎ้
็ฑปๅซๅจๅญๅจ`features`ไธญใ"""
datasets["train"].features[f"ner_tags"]
"""ๆไปฅไปฅNERไธบไพ๏ผ0ๅฏนๅบ็ๆ ็ญพ็ฑปๅซๆฏโOโ๏ผ 1ๅฏนๅบ็ๆฏโB-PERโ็ญ็ญใโOโ็ๆๆๆฏๆฒกๆ็นๅซๅฎไฝ๏ผno special entity๏ผใๆฌไพๅ
ๅซ4็งๅฎไฝ็ฑปๅซๅๅซๆฏ๏ผPERใORGใLOC๏ผMISC๏ผ๏ผๆฏไธ็งๅฎไฝ็ฑปๅซๅๅๅซๆB-๏ผๅฎไฝๅผๅง็token๏ผๅ็ผๅI-๏ผๅฎไฝไธญ้ด็token๏ผๅ็ผใ
- 'PER' for person
- 'ORG' for organization
- 'LOC' for location
- 'MISC' for miscellaneous
Since the labels are lists of `ClassLabel`, the actual names of the labels are nested in the `feature` attribute of the object above:
"""
label_list = datasets["train"].features[f"{task}_tags"].feature.names
label_list
"""ไธบไบ่ฝๅค่ฟไธๆญฅ็่งฃๆฐๆฎ้ฟไปไนๆ ทๅญ๏ผไธ้ข็ๅฝๆฐๅฐไปๆฐๆฎ้้้ๆบ้ๆฉๅ ไธชไพๅญ่ฟ่กๅฑ็คบใ"""
from datasets import ClassLabel, Sequence
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=10):
assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = | pd.DataFrame(dataset[picks]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("ืฉืืื\nืฉืืื".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["ืฉื", "ืื"]], columns=["ืฉื", "ืื"])
tm.assert_frame_equal(result, expected)
data = BytesIO("ืฉืืื::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["ืฉืืื", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf( | StringIO(test) | pandas.compat.StringIO |
import PyPDF2
import csv
from pathlib import Path
import io
import pandas
import numpy
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
# def Cpk(usl, lsl, avg, sigma , cf, sigma_cf):
# cpu = (usl - avg - (cf*sigma)) / (sigma_cf*sigma)
# cpl = (avg - lsl - (cf*sigma)) / (sigma_cf*sigma)
# cpk = numpy.min([cpu, cpl])
# return cpl,cpu,cpk
def convert_pdf_to_txt(path):
rsrcmgr = PDFResourceManager()
retstr = io.BytesIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
return text
def filename_extraction(inp_filename):
raw = inp_filename.split('_')
dev = raw[1]
volt = raw[2]
temp = raw[3]
condition = raw[4]+raw[5]+raw[6]+raw[7]
return dev,volt,temp,condition
############################### User inputs ###############################################
path_of_files = r'C:\Users\vind\OneDrive - Cypress Semiconductor\documents\python_codes\EYE_DIAG_ANALYZER\pdf_ccg3pa2_tt'
pathlist = Path(path_of_files).glob('**/*.pdf')
output_filename = 'out'
automated_data_collection = 'yes' #'no'
################################# Program Begins #########################################
if automated_data_collection == 'no':
with open(output_filename +'raw'+ '.csv', 'a', newline='') as csvfile:
mywriter1 = csv.DictWriter(csvfile, dialect='excel',
fieldnames=['rise_time_average', 'rise_time_minimum', 'rise_time_maximum',
'fall_time_average', 'fall_time_minimum', 'fall_time_maximum',
'bit_rate_average', 'bit_rate_minimum', 'bit_rate_maximum',
'voltage_swing_average', 'voltage_swing_minimum', 'voltage_swing_maximum', 'filename'])
mywriter1.writeheader()
for files in pathlist:
###################### extracting only measurement page of the pdf file ##########################################
print(files.name)
pdfFileObj = open(files,'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
pdfWriter = PyPDF2.PdfFileWriter()
pdfReader.getNumPages()
pageNum = 3
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
pdfOutput = open('temp.pdf', 'wb')
pdfWriter.write(pdfOutput)
pdfOutput.close()
######################### pdf to text conversion ################################
x= convert_pdf_to_txt('temp.pdf')
text_extracted = x.split()
counter_list = list(enumerate(text_extracted, 1))
rise_time_average = (counter_list[91])[1]
fall_time_average = (counter_list[93])[1]
bit_rate_average = (counter_list[97])[1]
rise_time_minimum = (counter_list[145])[1]
fall_time_minimum = (counter_list[147])[1]
bit_rate_minimum = (counter_list[151])[1]
rise_time_maximum = (counter_list[156])[1]
fall_time_maximum = (counter_list[158])[1]
bit_rate_maximum = (counter_list[162])[1]
voltage_swing_average = (counter_list[131])[1]
voltage_swing_minimum = (counter_list[170])[1]
voltage_swing_maximum = (counter_list[174])[1]
data_raw = [float(rise_time_average), float(rise_time_minimum), float(rise_time_maximum), float(fall_time_average),
float(fall_time_minimum), float(fall_time_maximum), float(bit_rate_average), float(bit_rate_minimum),
float(bit_rate_maximum), float(voltage_swing_average), float(voltage_swing_minimum),
float(voltage_swing_maximum), files.name]
print(data_raw)
mywriter2 = csv.writer(csvfile, delimiter=',', dialect = 'excel')
mywriter2.writerow(data_raw)
################## Analysis begins ##########################################
pandas.set_option('display.expand_frame_repr', False)
data = pandas.DataFrame.from_csv(output_filename + 'raw' +'.csv',index_col=None)
data_grouped = data.agg([numpy.min, numpy.mean, numpy.max, numpy.std])
print(data_grouped)
writer = pandas.ExcelWriter(output_filename + '.xlsx')
data_grouped.to_excel(writer, 'Sheet1')
writer.save()
if automated_data_collection == 'yes':
with open(output_filename +'raw'+ '.csv', 'a', newline='') as csvfile:
mywriter1 = csv.DictWriter(csvfile, dialect='excel',
fieldnames=['rise_time_average', 'rise_time_minimum', 'rise_time_maximum',
'fall_time_average', 'fall_time_minimum', 'fall_time_maximum',
'bit_rate_average', 'bit_rate_minimum', 'bit_rate_maximum',
'voltage_swing_average', 'voltage_swing_minimum', 'voltage_swing_maximum', 'Device','Voltage','Temperature','Condition'])
mywriter1.writeheader()
for files in pathlist:
###################### extracting only measurement page of the pdf file ##########################################
print(files.name)
dev_no,v,t,cond = filename_extraction(files.name)
pdfFileObj = open(files,'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
pdfWriter = PyPDF2.PdfFileWriter()
pdfReader.getNumPages()
pageNum = 3
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
pdfOutput = open('temp.pdf', 'wb')
pdfWriter.write(pdfOutput)
pdfOutput.close()
######################### pdf to text conversion ################################
x= convert_pdf_to_txt('temp.pdf')
text_extracted = x.split()
counter_list = list(enumerate(text_extracted, 1))
rise_time_average = (counter_list[91])[1]
fall_time_average = (counter_list[93])[1]
bit_rate_average = (counter_list[97])[1]
rise_time_minimum = (counter_list[145])[1]
fall_time_minimum = (counter_list[147])[1]
bit_rate_minimum = (counter_list[151])[1]
rise_time_maximum = (counter_list[156])[1]
fall_time_maximum = (counter_list[158])[1]
bit_rate_maximum = (counter_list[162])[1]
voltage_swing_average = (counter_list[131])[1]
voltage_swing_minimum = (counter_list[170])[1]
voltage_swing_maximum = (counter_list[174])[1]
data_raw = [float(rise_time_average), float(rise_time_minimum), float(rise_time_maximum), float(fall_time_average),
float(fall_time_minimum), float(fall_time_maximum), float(bit_rate_average), float(bit_rate_minimum),
float(bit_rate_maximum), float(voltage_swing_average), float(voltage_swing_minimum),
float(voltage_swing_maximum), dev_no, v,t,cond]
print(data_raw)
mywriter2 = csv.writer(csvfile, delimiter=',', dialect = 'excel')
mywriter2.writerow(data_raw)
################## Analysis begins ##########################################
pandas.set_option('display.expand_frame_repr', False)
data = pandas.DataFrame.from_csv(output_filename + 'raw' +'.csv',index_col=None)
data1 = data.groupby(['Voltage','Temperature','Condition'])
data_grouped = data1.agg([numpy.min, numpy.mean, numpy.max, numpy.std])
print(data_grouped)
writer = | pandas.ExcelWriter(output_filename + '.xlsx') | pandas.ExcelWriter |
"""
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import annotations
import operator
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Literal,
Union,
cast,
final,
)
from warnings import warn
import numpy as np
from pandas._libs import (
algos,
hashtable as htable,
iNaT,
lib,
)
from pandas._typing import (
AnyArrayLike,
ArrayLike,
DtypeObj,
Scalar,
TakeIndexer,
npt,
)
from pandas.util._decorators import doc
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
infer_dtype_from_array,
sanitize_to_nanoseconds,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_object,
ensure_platform_int,
is_array_like,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.generic import (
ABCDatetimeArray,
ABCExtensionArray,
ABCIndex,
ABCMultiIndex,
ABCRangeIndex,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
)
from pandas.core.array_algos.take import take_nd
from pandas.core.construction import (
array as pd_array,
ensure_wrapped_if_datetimelike,
extract_array,
)
from pandas.core.indexers import validate_indices
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
TimedeltaArray,
)
_shared_docs: dict[str, str] = {}
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values: ArrayLike) -> np.ndarray:
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : np.ndarray or ExtensionArray
Returns
-------
np.ndarray
"""
if not isinstance(values, ABCMultiIndex):
# extract_array would raise
values = extract_array(values, extract_numpy=True)
# we check some simple dtypes first
if is_object_dtype(values.dtype):
return ensure_object(np.asarray(values))
elif is_bool_dtype(values.dtype):
if isinstance(values, np.ndarray):
# i.e. actually dtype == np.dtype("bool")
return np.asarray(values).view("uint8")
else:
# i.e. all-bool Categorical, BooleanArray
try:
return np.asarray(values).astype("uint8", copy=False)
except TypeError:
# GH#42107 we have pd.NAs present
return np.asarray(values)
elif is_integer_dtype(values.dtype):
return np.asarray(values)
elif is_float_dtype(values.dtype):
# Note: checking `values.dtype == "float128"` raises on Windows and 32bit
# error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]"
# has no attribute "itemsize"
if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr]
# we dont (yet) have float128 hashtable support
return ensure_float64(values)
return np.asarray(values)
elif is_complex_dtype(values.dtype):
# Incompatible return value type (got "Tuple[Union[Any, ExtensionArray,
# ndarray[Any, Any]], Union[Any, ExtensionDtype]]", expected
# "Tuple[ndarray[Any, Any], Union[dtype[Any], ExtensionDtype]]")
return values # type: ignore[return-value]
# datetimelike
elif needs_i8_conversion(values.dtype):
if isinstance(values, np.ndarray):
values = sanitize_to_nanoseconds(values)
npvalues = values.view("i8")
npvalues = cast(np.ndarray, npvalues)
return npvalues
elif is_categorical_dtype(values.dtype):
values = cast("Categorical", values)
values = values.codes
return values
# we have failed, return object
values = np.asarray(values, dtype=object)
return ensure_object(values)
def _reconstruct_data(
values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike
) -> ArrayLike:
"""
reverse of _ensure_data
Parameters
----------
values : np.ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
original : AnyArrayLike
Returns
-------
ExtensionArray or np.ndarray
"""
if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
# Catch DatetimeArray/TimedeltaArray
return values
if not isinstance(dtype, np.dtype):
# i.e. ExtensionDtype
cls = dtype.construct_array_type()
if isinstance(values, cls) and values.dtype == dtype:
return values
values = cls._from_sequence(values)
elif is_bool_dtype(dtype):
values = values.astype(dtype, copy=False)
# we only support object dtypes bool Index
if isinstance(original, ABCIndex):
values = values.astype(object, copy=False)
elif dtype is not None:
if is_datetime64_dtype(dtype):
dtype = np.dtype("datetime64[ns]")
elif is_timedelta64_dtype(dtype):
dtype = np.dtype("timedelta64[ns]")
values = values.astype(dtype, copy=False)
return values
def _ensure_arraylike(values) -> ArrayLike:
"""
ensure that we are arraylike if not already
"""
if not is_array_like(values):
inferred = lib.infer_dtype(values, skipna=False)
if inferred in ["mixed", "string", "mixed-integer"]:
# "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160
if isinstance(values, tuple):
values = list(values)
values = construct_1d_object_array_from_listlike(values)
else:
values = np.asarray(values)
return values
_hashtables = {
"complex128": htable.Complex128HashTable,
"complex64": htable.Complex64HashTable,
"float64": htable.Float64HashTable,
"float32": htable.Float32HashTable,
"uint64": htable.UInt64HashTable,
"uint32": htable.UInt32HashTable,
"uint16": htable.UInt16HashTable,
"uint8": htable.UInt8HashTable,
"int64": htable.Int64HashTable,
"int32": htable.Int32HashTable,
"int16": htable.Int16HashTable,
"int8": htable.Int8HashTable,
"string": htable.StringHashTable,
"object": htable.PyObjectHashTable,
}
def _get_hashtable_algo(values: np.ndarray):
"""
Parameters
----------
values : np.ndarray
Returns
-------
htable : HashTable subclass
values : ndarray
"""
values = _ensure_data(values)
ndtype = _check_object_for_strings(values)
htable = _hashtables[ndtype]
return htable, values
def _get_values_for_rank(values: ArrayLike) -> np.ndarray:
if is_categorical_dtype(values):
values = cast("Categorical", values)._values_for_rank()
values = _ensure_data(values)
if values.dtype.kind in ["i", "u", "f"]:
# rank_t includes only object, int64, uint64, float64
dtype = values.dtype.kind + "8"
values = values.astype(dtype, copy=False)
return values
def get_data_algo(values: ArrayLike):
values = _get_values_for_rank(values)
ndtype = _check_object_for_strings(values)
htable = _hashtables.get(ndtype, _hashtables["object"])
return htable, values
def _check_object_for_strings(values: np.ndarray) -> str:
"""
Check if we can use string hashtable instead of object hashtable.
Parameters
----------
values : ndarray
Returns
-------
str
"""
ndtype = values.dtype.name
if ndtype == "object":
# it's cheaper to use a String Hash Table than Object; we infer
# including nulls because that is the only difference between
# StringHashTable and ObjectHashtable
if lib.infer_dtype(values, skipna=False) in ["string"]:
ndtype = "string"
return ndtype
# --------------- #
# top-level algos #
# --------------- #
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique for long enough sequences.
Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
numpy.ndarray or ExtensionArray
The return can be:
* Index : when the input is an Index
* Categorical : when the input is a Categorical dtype
* ndarray : when the input is a Series/ndarray
Return numpy.ndarray or ExtensionArray.
See Also
--------
Index.unique : Return unique values from an Index.
Series.unique : Return unique values of Series object.
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(
... pd.Series(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
>>> pd.unique(
... pd.Index(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
>>> pd.unique(list("baabc"))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
An ordered Categorical preserves the category ordering.
>>> pd.unique(
... pd.Series(
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
... )
... )
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
An array of tuples
>>> pd.unique([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
"""
values = _ensure_arraylike(values)
if is_extension_array_dtype(values.dtype):
# Dispatch to extension dtype's unique.
return values.unique()
original = values
htable, values = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, original.dtype, original)
return uniques
unique1d = unique
def isin(comps: AnyArrayLike, values: AnyArrayLike) -> npt.NDArray[np.bool_]:
"""
Compute the isin boolean array.
Parameters
----------
comps : array-like
values : array-like
Returns
-------
ndarray[bool]
Same length as `comps`.
"""
if not is_list_like(comps):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(comps).__name__}]"
)
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(values).__name__}]"
)
if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
values = _ensure_arraylike(list(values))
elif isinstance(values, ABCMultiIndex):
# Avoid raising in extract_array
values = np.array(values)
else:
values = extract_array(values, extract_numpy=True, extract_range=True)
comps = _ensure_arraylike(comps)
comps = extract_array(comps, extract_numpy=True)
if not isinstance(comps, np.ndarray):
# i.e. Extension Array
return comps.isin(values)
elif needs_i8_conversion(comps.dtype):
# Dispatch to DatetimeLikeArrayMixin.isin
return pd_array(comps).isin(values)
elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps.dtype):
# e.g. comps are integers and values are datetime64s
return np.zeros(comps.shape, dtype=bool)
# TODO: not quite right ... Sparse/Categorical
elif needs_i8_conversion(values.dtype):
return isin(comps, values.astype(object))
elif is_extension_array_dtype(values.dtype):
return isin(np.asarray(comps), np.asarray(values))
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
# in1d is faster for small sizes
if len(comps) > 1_000_000 and len(values) <= 26 and not is_object_dtype(comps):
# If the values include nan we need to check for nan explicitly
# since np.nan it not equal to np.nan
if isna(values).any():
def f(c, v):
return np.logical_or(np.in1d(c, v), np.isnan(c))
else:
f = np.in1d
else:
# error: List item 0 has incompatible type "Union[Any, dtype[Any],
# ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any,
# Any]]"
# error: List item 1 has incompatible type "Union[Any, ExtensionDtype]";
# expected "Union[dtype[Any], None, type, _SupportsDType, str, Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]"
# error: List item 1 has incompatible type "Union[dtype[Any], ExtensionDtype]";
# expected "Union[dtype[Any], None, type, _SupportsDType, str, Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]"
common = np.find_common_type(
[values.dtype, comps.dtype], [] # type: ignore[list-item]
)
values = values.astype(common, copy=False)
comps = comps.astype(common, copy=False)
f = htable.ismember
return f(comps, values)
def factorize_array(
values: np.ndarray,
na_sentinel: int = -1,
size_hint: int | None = None,
na_value=None,
mask: np.ndarray | None = None,
) -> tuple[npt.NDArray[np.intp], np.ndarray]:
"""
Factorize a numpy array to codes and uniques.
This doesn't do any coercion of types or unboxing before factorization.
Parameters
----------
values : ndarray
na_sentinel : int, default -1
size_hint : int, optional
Passed through to the hashtable's 'get_labels' method
na_value : object, optional
A value in `values` to consider missing. Note: only use this
parameter when you know that you don't have any values pandas would
consider missing in the array (NaN for float data, iNaT for
datetimes, etc.).
mask : ndarray[bool], optional
If not None, the mask is used as indicator for missing values
(True = missing, False = valid) instead of `na_value` or
condition "val != val".
Returns
-------
codes : ndarray[np.intp]
uniques : ndarray
"""
hash_klass, values = get_data_algo(values)
table = hash_klass(size_hint or len(values))
uniques, codes = table.factorize(
values, na_sentinel=na_sentinel, na_value=na_value, mask=mask
)
codes = ensure_platform_int(codes)
return codes, uniques
@doc(
values=dedent(
"""\
values : sequence
A 1-D sequence. Sequences that aren't pandas objects are
coerced to ndarrays before factorization.
"""
),
sort=dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `codes` to maintain the
relationship.
"""
),
size_hint=dedent(
"""\
size_hint : int, optional
Hint to the hashtable sizer.
"""
),
)
def factorize(
values,
sort: bool = False,
na_sentinel: int | None = -1,
size_hint: int | None = None,
) -> tuple[np.ndarray, np.ndarray | Index]:
"""
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values. `factorize`
is available as both a top-level function :func:`pandas.factorize`,
and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
Parameters
----------
{values}{sort}
na_sentinel : int or None, default -1
Value to mark "not found". If None, will not drop the NaN
from the uniques of the values.
.. versionchanged:: 1.1.2
{size_hint}\
Returns
-------
codes : ndarray
An integer ndarray that's an indexer into `uniques`.
``uniques.take(codes)`` will have the same values as `values`.
uniques : ndarray, Index, or Categorical
The unique valid values. When `values` is Categorical, `uniques`
is a Categorical. When `values` is some other pandas object, an
`Index` is returned. Otherwise, a 1-D ndarray is returned.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
See Also
--------
cut : Discretize continuous-valued array.
unique : Find the unique value in an array.
Examples
--------
These examples all show factorize as a top-level method like
``pd.factorize(values)``. The results are identical for methods like
:meth:`Series.factorize`.
>>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'])
>>> codes
array([0, 0, 1, 2, 0]...)
>>> uniques
array(['b', 'a', 'c'], dtype=object)
With ``sort=True``, the `uniques` will be sorted, and `codes` will be
shuffled so that the relationship is the maintained.
>>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'], sort=True)
>>> codes
array([1, 1, 0, 2, 1]...)
>>> uniques
array(['a', 'b', 'c'], dtype=object)
Missing values are indicated in `codes` with `na_sentinel`
(``-1`` by default). Note that missing values are never
included in `uniques`.
>>> codes, uniques = pd.factorize(['b', None, 'a', 'c', 'b'])
>>> codes
array([ 0, -1, 1, 2, 0]...)
>>> uniques
array(['b', 'a', 'c'], dtype=object)
Thus far, we've only factorized lists (which are internally coerced to
NumPy arrays). When factorizing pandas objects, the type of `uniques`
will differ. For Categoricals, a `Categorical` is returned.
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
>>> codes, uniques = pd.factorize(cat)
>>> codes
array([0, 0, 1]...)
>>> uniques
['a', 'c']
Categories (3, object): ['a', 'b', 'c']
Notice that ``'b'`` is in ``uniques.categories``, despite not being
present in ``cat.values``.
For all other pandas objects, an Index of the appropriate type is
returned.
>>> cat = pd.Series(['a', 'a', 'c'])
>>> codes, uniques = pd.factorize(cat)
>>> codes
array([0, 0, 1]...)
>>> uniques
Index(['a', 'c'], dtype='object')
If NaN is in the values, and we want to include NaN in the uniques of the
values, it can be achieved by setting ``na_sentinel=None``.
>>> values = np.array([1, 2, 1, np.nan])
>>> codes, uniques = pd.factorize(values) # default: na_sentinel=-1
>>> codes
array([ 0, 1, 0, -1])
>>> uniques
array([1., 2.])
>>> codes, uniques = pd.factorize(values, na_sentinel=None)
>>> codes
array([0, 1, 0, 2])
>>> uniques
array([ 1., 2., nan])
"""
# Implementation notes: This method is responsible for 3 things
# 1.) coercing data to array-like (ndarray, Index, extension array)
# 2.) factorizing codes and uniques
# 3.) Maybe boxing the uniques in an Index
#
# Step 2 is dispatched to extension types (like Categorical). They are
# responsible only for factorization. All data coercion, sorting and boxing
# should happen here.
if isinstance(values, ABCRangeIndex):
return values.factorize(sort=sort)
values = _ensure_arraylike(values)
original = values
if not isinstance(values, ABCMultiIndex):
values = extract_array(values, extract_numpy=True)
# GH35667, if na_sentinel=None, we will not dropna NaNs from the uniques
# of values, assign na_sentinel=-1 to replace code value for NaN.
dropna = True
if na_sentinel is None:
na_sentinel = -1
dropna = False
if (
isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
and values.freq is not None
):
codes, uniques = values.factorize(sort=sort)
if isinstance(original, ABCIndex):
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return codes, uniques
if not isinstance(values.dtype, np.dtype):
# i.e. ExtensionDtype
codes, uniques = values.factorize(na_sentinel=na_sentinel)
dtype = original.dtype
else:
dtype = values.dtype
values = _ensure_data(values)
na_value: Scalar
if original.dtype.kind in ["m", "M"]:
# Note: factorize_array will cast NaT bc it has a __int__
# method, but will not cast the more-correct dtype.type("nat")
na_value = iNaT
else:
na_value = None
codes, uniques = factorize_array(
values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value
)
if sort and len(uniques) > 0:
uniques, codes = safe_sort(
uniques, codes, na_sentinel=na_sentinel, assume_unique=True, verify=False
)
code_is_na = codes == na_sentinel
if not dropna and code_is_na.any():
# na_value is set based on the dtype of uniques, and compat set to False is
# because we do not want na_value to be 0 for integers
na_value = na_value_for_dtype(uniques.dtype, compat=False)
uniques = np.append(uniques, [na_value])
codes = np.where(code_is_na, len(uniques) - 1, codes)
uniques = _reconstruct_data(uniques, dtype, original)
# return original tenor
if isinstance(original, ABCIndex):
if original.dtype.kind in ["m", "M"] and isinstance(uniques, np.ndarray):
original._data = cast(
"Union[DatetimeArray, TimedeltaArray]", original._data
)
uniques = type(original._data)._simple_new(uniques, dtype=original.dtype)
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return codes, uniques
def value_counts(
values,
sort: bool = True,
ascending: bool = False,
normalize: bool = False,
bins=None,
dropna: bool = True,
) -> Series:
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : bool, default True
Sort by values
ascending : bool, default False
Sort in ascending order
normalize: bool, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : bool, default True
Don't include counts of NaN
Returns
-------
Series
"""
from pandas.core.series import Series
name = getattr(values, "name", None)
if bins is not None:
from pandas.core.reshape.tile import cut
values = Series(values)
try:
ii = cut(values, bins, include_lowest=True)
except TypeError as err:
raise TypeError("bins argument only works with numeric data.") from err
# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
result = result[result.index.notna()]
result.index = result.index.astype("interval")
result = result.sort_index()
# if we are dropna and we have NO values
if dropna and (result._values == 0).all():
result = result.iloc[0:0]
# normalizing is by len of all (regardless of dropna)
counts = np.array([len(ii)])
else:
if is_extension_array_dtype(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
result.name = name
counts = result._values
else:
keys, counts = value_counts_arraylike(values, dropna)
result = Series(counts, index=keys, name=name)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / counts.sum()
return result
# Called once from SparseArray, otherwise could be private
def value_counts_arraylike(values, dropna: bool):
"""
Parameters
----------
values : arraylike
dropna : bool
Returns
-------
uniques : np.ndarray or ExtensionArray
counts : np.ndarray
"""
values = _ensure_arraylike(values)
original = values
values = _ensure_data(values)
# TODO: handle uint8
keys, counts = htable.value_count(values, dropna)
if needs_i8_conversion(original.dtype):
# datetime, timedelta, or period
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
res_keys = _reconstruct_data(keys, original.dtype, original)
return res_keys, counts
def duplicated(
values: ArrayLike, keep: Literal["first", "last", False] = "first"
) -> npt.NDArray[np.bool_]:
"""
Return boolean ndarray denoting duplicate values.
Parameters
----------
values : nd.array, ExtensionArray or Series
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray[bool]
"""
values = _ensure_data(values)
return htable.duplicated(values, keep=keep)
def mode(values, dropna: bool = True) -> Series:
"""
Returns the mode(s) of an array.
Parameters
----------
values : array-like
Array over which to check for duplicate values.
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
mode : Series
"""
from pandas import Series
from pandas.core.indexes.api import default_index
values = _ensure_arraylike(values)
original = values
# categorical is a fast-path
if is_categorical_dtype(values):
if isinstance(values, Series):
# TODO: should we be passing `name` below?
return Series(values._values.mode(dropna=dropna), name=values.name)
return values.mode(dropna=dropna)
if dropna and needs_i8_conversion(values.dtype):
mask = values.isnull()
values = values[~mask]
values = _ensure_data(values)
npresult = htable.mode(values, dropna=dropna)
try:
npresult = np.sort(npresult)
except TypeError as err:
warn(f"Unable to sort modes: {err}")
result = _reconstruct_data(npresult, original.dtype, original)
# Ensure index is type stable (should always use int index)
return Series(result, index=default_index(len(result)))
def rank(
values: ArrayLike,
axis: int = 0,
method: str = "average",
na_option: str = "keep",
ascending: bool = True,
pct: bool = False,
) -> np.ndarray:
"""
Rank the values along a given axis.
Parameters
----------
values : array-like
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
is_datetimelike = needs_i8_conversion(values.dtype)
values = _get_values_for_rank(values)
if values.ndim == 1:
ranks = algos.rank_1d(
values,
is_datetimelike=is_datetimelike,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
elif values.ndim == 2:
ranks = algos.rank_2d(
values,
axis=axis,
is_datetimelike=is_datetimelike,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
def checked_add_with_arr(
arr: np.ndarray,
b,
arr_mask: npt.NDArray[np.bool_] | None = None,
b_mask: npt.NDArray[np.bool_] | None = None,
) -> np.ndarray:
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : np.ndarray[bool] or None, default None
array indicating which elements to exclude from checking
b_mask : np.ndarray[bool] or None, default None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = np.broadcast_to(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = np.broadcast_to(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
i8max = lib.i8max
i8min = iNaT
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((i8min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((i8max - b2 < arr) & not_nan).any()
else:
to_raise = ((i8max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or (
(i8min - b2[mask2] > arr[mask2]) & not_nan[mask2]
).any()
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b
def quantile(x, q, interpolation_method="fraction"):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""
Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == "fraction":
score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1)
elif interpolation_method == "lower":
score = values[np.floor(idx)]
elif interpolation_method == "higher":
score = values[np.ceil(idx)]
else:
raise ValueError(
"interpolation_method can only be 'fraction' "
", 'lower' or 'higher'"
)
return score
if is_scalar(q):
return _get_score(q)
q = np.asarray(q, np.float64)
result = [_get_score(x) for x in q]
return np.array(result, dtype=np.float64)
# --------------- #
# select n #
# --------------- #
class SelectN:
def __init__(self, obj, n: int, keep: str):
self.obj = obj
self.n = n
self.keep = keep
if self.keep not in ("first", "last", "all"):
raise ValueError('keep must be either "first", "last" or "all"')
def compute(self, method: str) -> DataFrame | Series:
raise NotImplementedError
@final
def nlargest(self):
return self.compute("nlargest")
@final
def nsmallest(self):
return self.compute("nsmallest")
@final
@staticmethod
def is_valid_dtype_n_method(dtype: DtypeObj) -> bool:
"""
Helper function to determine if dtype is valid for
nsmallest/nlargest methods
"""
return (
is_numeric_dtype(dtype) and not | is_complex_dtype(dtype) | pandas.core.dtypes.common.is_complex_dtype |
"""Next Gen Loaders contains a class of functions that loads a next gen data set, returning the data in a
consistent, well structured format.
Data is read as it is and is not cleaned in anyway in this class (if cleaning is required see
preparation.cleaners).
"""
import os
import pandas
import numpy
import pickle
class NextGenData:
"""This classes is responsible for loading and managing next gen data sets provided by Reposit
The data provided can be read and converted measurement data. Data is
read and stored as an intermediate step in a folder "Data", each measurement type has its own
sub folder (loads, batteries, node, solar) and is aggregated per node. To handle huge amounts of
data, it is read in batches. Each file is labeled with the type, node id and batch number
(measurement_type_nodeID_batchnumber). Batches can be concatenated and stored per node. The data is
stored as dataframes
"""
def __init__(self, data_name: str, source: str, batteries: str = None, solar: str = None,
node: str = None, loads: str = None, results: str = None, stats: str = None,
number_of_batches: int = 30, files_per_batch: int = 30,
concat_batches_start: int = 0, concat_batches_end: int = 1):
"""Creates a Data object for measurement data.
It sets up the folder structure for later use. Paths can be adjusted to user needs or used
with defaults. If any of the destination folders are not provided a default will be create
at ./data/FOLDERNAME
Batches can be used to separate data into smaller units. The user can choose which batches
they wish to concatenate after cleaning later on.
Args:
data_name (str): Human readable short hand name of data set.
source (str): Path to data source (if not provided FileNotFoundError.
batteries (str, None): Path to where batteries data is stored (destination).
solar (str, None): Path to where solar data is stored (destination).
node (str, None): Path to where node data is stored (destination).
loads (str, None): Path to where loads data is stored (destination).
results (str, None): Path to where concatenated and cleaned data is stored
(destination).
stats (str, None): Path to where statitsics are stored
(destination).
number_of_batches (int, 30): Integer with the number of batches that should be used.
files_per_batch (int, 30): The number of files in each batch.
concat_batches_start (int, 0): Integer that determines with which batch concatenation
starts.
concat_batches_end (int, 1): Integer that determines with which batch concatenation
ends.
"""
self.batch_info = {}
self.data_dir = {}
if source is None or not os.path.isdir(source):
raise FileNotFoundError
if batteries is None:
batteries = "data/batteries"
if solar is None:
solar = "data/solar"
if loads is None:
loads = "data/loads"
if node is None:
node = "data/node"
if results is None:
results = "data/results"
if stats is None:
stats = "data/stats"
self.data_dir["source"] = source
self.data_dir["batteries"] = batteries
self.data_dir["solar"] = solar
self.data_dir["loads"] = loads
self.data_dir["node"] = node
self.data_dir["results"] = results
self.data_dir["stats"] = stats
for key in self.data_dir.keys():
if key != "source":
try:
os.makedirs(self.data_dir[key])
except OSError:
print("Creation of the directories failed (folders already there?)", self.data_dir[key])
self.data_name = data_name
self.data_files = os.listdir(self.data_dir["source"])
self.data_files = sorted([f for f in self.data_files if f.endswith('.csv')])
if len(self.data_files) < files_per_batch:
print("Warning: not enough files for batching, set to appropriate values")
files_per_batch = 1
concat_batches_start = 0
concat_batches_end = 1
self.batch_info["number_of_batches"] = number_of_batches
self.batch_info["concat_batches_start"] = concat_batches_start
self.batch_info["concat_batches_end"] = concat_batches_end
self.batch_info["files_per_batch"] = files_per_batch
self.node_measurement_dict = {}
if data_name == 'NextGen':
batches = numpy.arange(self.batch_info["number_of_batches"])
for batch in batches:
self.load_nextgen_processing(batch)
def load_nextgen_processing(self, batch: int = 0):
"""Loads NextGen data provided by Reposit.
The function can only read CSV-Files in a specific format. Dataframes are created and stored
to disk, sorted via node_id. All measurements are saved as separate data files per batch so
that they can be processed and filtered later on according to user requirements.
All Data is stored to a "data" folder which needs to contain sub folders (batteries, solar,
node and loads) for each measurement type. Each file is identified by a batch number.
Note: timestamps refer to the start of measurement period in UTC.
Note: Data is generally in kW, kVA etc. See Readme.md for details.
Args:
batch (integer, 0): Batchnumber that processing is working on
"""
batch_start = batch * self.batch_info["files_per_batch"]
batch_end = (batch + 1) * self.batch_info["files_per_batch"]
data_files = self.data_files[batch_start:batch_end]
raw_df = pandas.concat([pandas.read_csv(os.path.join(self.data_dir["source"], f))
for f in data_files])
raw_df = raw_df.rename(columns={'major': 'utc'})
raw_df = raw_df.set_index('utc')
# Note the time intervals of the raw data, even though it's 1.
info_df = pandas.read_json(os.path.join(self.data_dir["source"], "deployment_info.json"))
info_df = info_df.set_index('id')
info_df.to_pickle(self.data_dir["results"] + "/node_info.npy")
node_names = sorted(raw_df.identifier.unique())
for id_name in node_names:
node_df = raw_df.loc[raw_df['identifier'] == id_name]
# create measurement data for loads
power = pandas.DataFrame(-node_df['solarPower']
- node_df['batteryPower']
+ node_df['meterPower'])
reactive_power = pandas.DataFrame(node_df['meterReactivePower']
- node_df['batteryReactivePower'])
loads_df = pandas.DataFrame(pandas.concat([power, reactive_power], axis=1))
loads_df.set_index(node_df.index)
# create measurements for solar
solar_df = pandas.DataFrame(node_df['solarPower'])
solar_df.set_index(node_df.index)
# create a measurements for battery
batt_p_q_c = pandas.concat([node_df['batteryPower'],
node_df['batteryReactivePower'],
node_df['remainingCharge'].apply(lambda x: x / 1000)],
axis=1)
batt_p_q_c.set_index(node_df.index)
v_f = node_df[['meterVoltage', 'meterFrequency']]
# save data to different folder so it can be read seperatly if needed
batt_p_q_c.to_pickle(self.data_dir["batteries"] + "/measurement_batteries_"
+ str(id_name) + "_" + str(batch) + '.npy')
solar_df.to_pickle(self.data_dir["solar"] + "/measurement_solar_"
+ str(id_name) + "_" + str(batch) + '.npy')
loads_df.to_pickle(self.data_dir["loads"] + "/measurement_loads_"
+ str(id_name) + "_" + str(batch) + '.npy')
v_f.to_pickle(self.data_dir["node"] + "/measurement_node_"
+ str(id_name) + "_" + str(batch) + '.npy')
def create_node_list(self, meas_type="batteries") -> list:
""" Creates a list of node_ids for a given type of measurement.
Args:
meas_type (str, "batteries"): The measurement type. Acceptable values are batteries,
solar, loads, node, results.
Returns
node_ids (list): list of IDs (empty if no nodes are found).
"""
nodes = os.listdir(self.data_dir[meas_type])
nodes = sorted([f for f in nodes if f.endswith('.npy')])
node_ids = []
for node in nodes:
parts = node.split('_')
if len(parts) > 3:
node_id = node.split('_')[2]
if node_id not in node_ids:
node_ids.append(node_id)
return node_ids
def concat_data(self, meas_type: str = "batteries", concat_batches_start: int = 0,
concat_batches_end: int = 1):
"""Concatenates batches to one dataset per node.
A start and an end for the concatenation can be choosen by using the batch_info at init.
Concatenated data is saved to a file per node and labeled with the node id.
Args:
meas_type (str, "batteries"): The type of measurement to be concatenated. Acceptable
values are batteries, solar, loads, node.
concat_batches_start (int, 0): The batch number to start the concat at.
concat_batches_end (int, 1): The batch number to end the concat at.
TODO: Test to make sure this works. Currently untested for node data.
"""
batches = numpy.arange(concat_batches_start, concat_batches_end)
if meas_type == "loads":
path = self.data_dir["loads"] + "/measurement_loads_"
elif meas_type == "batteries":
path = self.data_dir["batteries"] + "/measurement_batteries_"
elif meas_type == "solar":
path = self.data_dir["solar"] + "/measurement_solar_"
elif meas_type == "node":
path = self.data_dir["node"] + "/measurement_node_"
node_ids = self.create_node_list(meas_type)
for node in node_ids:
first_run = True
measurement_df = pandas.DataFrame()
for batch in batches:
try:
print("working on batch: ", batch, "node Id: ",
node, "path: ", path + str(node) + '_' + str(batch) + '.npy')
measurement_df_tmp = pandas.read_pickle(path
+ str(node) + '_'
+ str(batch) + '.npy')
os.remove(path + str(node) + '_' + str(batch) + '.npy')
if first_run is True:
measurement_df = pandas.DataFrame(measurement_df_tmp)
else:
measurement_df = pandas.concat([measurement_df, measurement_df_tmp], axis=0)
first_run = False
except FileNotFoundError:
print("FILE NOT FOUND. Data may have been empty. Move on to next file")
if not measurement_df.empty:
measurement_df.to_pickle(path + str(node) + "_node" + '.npy')
else:
print("measurement data for node ", node, "is empty, no file create")
def to_measurement_full_dataset(self):
"""Collects data to build a measuring dictionary.
The data is saved to a node file with one measurement per node.
Returns:
node_network_dict: A network without any topology but with measurements.
"""
node_ids = self.create_node_list(meas_type="batteries")
node_ids_solar = self.create_node_list(meas_type="solar")
node_ids_loads = self.create_node_list(meas_type="loads")
node_ids.extend(x for x in node_ids_solar if x not in node_ids)
node_ids.extend(x for x in node_ids_loads if x not in node_ids)
full_dict = {}
for node_id in node_ids:
meas_dict = {}
try:
measurement_df_loads_temp = pandas.read_pickle(self.data_dir["results"]
+ "/measurement_loads_"
+ str(node_id) + "_node" + '.npy')
measurement_df_loads_temp.columns = ["PLG", "QLG"]
measurement_df_solar_temp = pandas.read_pickle(self.data_dir["results"]
+ "/measurement_solar_"
+ str(node_id) + "_node" + '.npy')
measurement_df_solar_temp.columns = ["PLG"]
measurement_df_batteries_temp = pandas.read_pickle(self.data_dir["results"]
+ "/measurement_batteries_type"
+ str(node_id) + "_node"
+ '.npy')
measurement_df_batteries_temp.columns = ["PLG", "QLG", "RC"]
meas_dict = {"loads_" + node_id: measurement_df_loads_temp,
"solar_" + node_id: measurement_df_solar_temp,
"battery_" + node_id: measurement_df_batteries_temp}
with open(self.data_dir["results"] + "/node_" + str(node_id), 'wb') as handle:
pickle.dump(meas_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
full_dict[node_id] = meas_dict
except FileNotFoundError:
print("node ", node_id, " has insufficient data .. No data added to dictionary")
return full_dict
def read_clean_data(self, loads=False, solar=False, batteries=False):
full_dict = {}
node_ids = self.create_node_list(meas_type="results")
for node in node_ids:
try:
measurement_df_loads_temp = pandas.DataFrame()
measurement_df_solar_temp = pandas.DataFrame()
measurement_df_batteries_temp = pandas.DataFrame()
if loads == True:
measurement_df_loads_temp = pandas.read_pickle(self.data_dir["results"]
+ "/measurement_loads_"
+ str(node) + "_node" + '.npy')
measurement_df_loads_temp.columns = ["PLG", "QLG"]
measurement_df_loads_temp.index = | pandas.to_datetime(measurement_df_loads_temp.index, unit='s') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""Test evaluator."""
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sktime.benchmarking.evaluation import Evaluator
from sktime.benchmarking.metrics import PairwiseMetric
from sktime.benchmarking.results import RAMResults
from sktime.series_as_features.model_selection import PresplitFilesCV
def dummy_results():
"""Results that are dummy."""
results = RAMResults()
results.cv = PresplitFilesCV()
results.save_predictions(
strategy_name="alg1",
dataset_name="dataset1",
index=np.array([1, 2, 3, 4]),
y_true=np.array([1, 1, 1, 1]),
y_pred=np.array([1, 1, 1, 1]),
y_proba=None,
cv_fold=0,
train_or_test="test",
fit_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
fit_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
predict_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
predict_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
)
results.save_predictions(
strategy_name="alg1",
dataset_name="dataset2",
index=np.array([1, 2, 3, 4]),
y_true=np.array([0, 0, 0, 0]),
y_pred=np.array([0, 0, 0, 0]),
y_proba=None,
cv_fold=0,
train_or_test="test",
fit_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
fit_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
predict_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
predict_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
)
results.save_predictions(
strategy_name="alg2",
dataset_name="dataset1",
index=np.array([1, 2, 3, 4]),
y_true=np.array([1, 1, 1, 1]),
y_pred=np.array([0, 0, 0, 0]),
y_proba=None,
cv_fold=0,
train_or_test="test",
fit_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
fit_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
predict_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
predict_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
)
results.save_predictions(
strategy_name="alg2",
dataset_name="dataset2",
index=np.array([1, 2, 3, 4]),
y_true=np.array([0, 0, 0, 0]),
y_pred=np.array([1, 1, 1, 1]),
y_proba=None,
cv_fold=0,
train_or_test="test",
fit_estimator_start_time= | pd.to_datetime(1605268800, unit="ms") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2020 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pytfa.io.json import load_json_model
import numpy as np
import pandas as pd
from skimpy.core.modifiers import *
from skimpy.io.yaml import load_yaml_model
from skimpy.core.reactor import Reactor
from skimpy.analysis.oracle.load_pytfa_solution import load_concentrations, load_fluxes
from skimpy.viz.plotting import timetrace_plot
from pytfa.io.json import load_json_model
from skimpy.io.yaml import load_yaml_model
from skimpy.analysis.oracle.load_pytfa_solution import load_concentrations
from skimpy.core.parameters import load_parameter_population
from skimpy.simulations.reactor import make_batch_reactor
from skimpy.core.solution import ODESolutionPopulation
from skimpy.utils.namespace import *
from skimpy.viz.escher import animate_fluxes, plot_fluxes
import numpy as np
import pandas as pd
WITH_ANIMATION = False # This can take some time
"""
Set up batch reactor
"""
reactor = make_batch_reactor('single_species.yaml')
reactor.compile_ode(add_dilution=False)
"""
"""
path_to_kmodel = './../../models/kin_varma.yml'
path_to_tmodel = './../../models/tfa_varma.json'
# load models
tmodel = load_json_model(path_to_tmodel)
kmodel = load_yaml_model(path_to_kmodel)
reference_solutions = | pd.read_csv('./../../data/tfa_reference_strains.csv', index_col=0) | pandas.read_csv |
from re import X
import pandas as pd
import numpy as np
from scipy.linalg import norm, eigh
class PCA():
"""
Get principal components and loadings from a matrix X, such as count matrix.
Get/Set Attributes:
k (int): The number of components to return. Defaults to 10.
norm_docs (bool): Whether to apply L2 normalization or not. Defaults to True.
center_by_mean (bool): Whether to center term vectors by mean. Defaults to True.
center_by_variance (bool): Whether to center term vectors by standard deviation. Defaults to False.
Generated Attributes:
LOADINGS (pd.DataFrame): A DataFrame of features by principal components.
OCM (pd.DataFrame): A DataFrame of observations by principal components.
COMPS (pd.DataFrame): A DataFrame of information about each component.
"""
k:int=10
norm_rows:bool=True
center_by_mean:bool=False
center_by_variance:bool=False
method:str='standard' # 'svd'
n_top_terms:int=5
def __init__(self, X:pd.DataFrame) -> None:
self.X = X
if self.X.isna().sum().sum():
self.X = self.X.fillna(0)
def compute_pca(self):
self._generate_covariance_matrix()
if self.method == 'standard':
self._compute_by_eigendecomposition()
elif self.method == 'svd':
self._compute_by_svd()
else:
raise ValueError(f"Unknown method {self.method}. Try 'standard' or 'svd'.")
self._get_top_terms()
def _generate_covariance_matrix(self):
"""
Get the covariance matrix of features from the input matrix X.
Apply norming and centering if wanted. Note that PCA as LSA does
not apply centering by mean or variance.
"""
if self.norm_rows:
self.X = self.X.apply(lambda x: x / norm(x), 1).fillna(0)
if self.center_by_mean:
self.X = self.X - self.X.mean()
if self.center_by_variance:
self.X = self.X / self.X.std()
self.COV = self.X.cov()
def _compute_by_svd(self):
"""
Use SVD to compute objects.
"""
u, d, vt = np.linalg.svd(self.X)
self.OCM = | pd.DataFrame(u[:,:self.k], index=self.X.index) | pandas.DataFrame |
"""Base class for working with mapped arrays.
This class takes the mapped array and the corresponding column and (optionally) index arrays,
and offers features to directly process the mapped array without converting it to pandas;
for example, to compute various statistics by column, such as standard deviation.
## Reducing
Using `MappedArray`, you can then reduce by column as follows:
* Use already provided reducers such as `MappedArray.mean`:
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> import vectorbt as vbt
>>> a = np.array([10., 11., 12., 13., 14., 15., 16., 17., 18.])
>>> col_arr = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> idx_arr = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
>>> wrapper = vbt.ArrayWrapper(index=['x', 'y', 'z'],
... columns=['a', 'b', 'c'], ndim=2, freq='1 day')
>>> ma = vbt.MappedArray(wrapper, a, col_arr, idx_arr=idx_arr)
>>> ma.mean()
a 11.0
b 14.0
c 17.0
dtype: float64
```
* Use `MappedArray.to_pd` to map to pandas and then reduce manually (expensive):
```python-repl
>>> ma.to_pd().mean()
a 11.0
b 14.0
c 17.0
dtype: float64
```
* Use `MappedArray.reduce` to reduce using a custom function:
```python-repl
>>> @njit
... def pow_mean_reduce_nb(col, a, pow):
... return np.mean(a ** pow)
>>> ma.reduce(pow_mean_reduce_nb, 2)
a 121.666667
b 196.666667
c 289.666667
dtype: float64
>>> @njit
... def min_max_reduce_nb(col, a):
... return np.array([np.min(a), np.max(a)])
>>> ma.reduce(min_max_reduce_nb, to_array=True, index=['min', 'max'])
a b c
min 10.0 13.0 16.0
max 12.0 15.0 18.0
>>> @njit
... def idxmin_idxmax_reduce_nb(col, a):
... return np.array([np.argmin(a), np.argmax(a)])
>>> ma.reduce(idxmin_idxmax_reduce_nb, to_array=True,
... to_idx=True, index=['idxmin', 'idxmax'])
a b c
idxmin x x x
idxmax z z z
```
## Conversion
You can expand any `MappedArray` instance to pandas:
* Given `idx_arr` was provided:
```python-repl
>>> ma.to_pd()
a b c
x 10.0 13.0 16.0
y 11.0 14.0 17.0
z 12.0 15.0 18.0
```
!!! note
Will raise an error if there are multiple values pointing to the same position.
* In case `group_by` was provided, index can be ignored, or there are position conflicts:
```python-repl
>>> ma.to_pd(group_by=np.array(['first', 'first', 'second']), ignore_index=True)
first second
0 10.0 16.0
1 11.0 17.0
2 12.0 18.0
3 13.0 NaN
4 14.0 NaN
5 15.0 NaN
```
## Filtering
Use `MappedArray.filter_by_mask` to filter elements per column/group:
```python-repl
>>> mask = [True, False, True, False, True, False, True, False, True]
>>> filtered_ma = ma.filter_by_mask(mask)
>>> filtered_ma.count()
a 2
b 1
c 2
dtype: int64
>>> filtered_ma.id_arr
array([0, 2, 4, 6, 8])
```
## Plotting
You can build histograms and boxplots of `MappedArray` directly:
```python-repl
>>> ma.boxplot()
```

To use scatterplots or any other plots that require index, convert to pandas first:
```python-repl
>>> ma.to_pd().vbt.plot()
```

## Grouping
One of the key features of `MappedArray` is that you can perform reducing operations on a group
of columns as if they were a single column. Groups can be specified by `group_by`, which
can be anything from positions or names of column levels, to a NumPy array with actual groups.
There are multiple ways of define grouping:
* When creating `MappedArray`, pass `group_by` to `vectorbt.base.array_wrapper.ArrayWrapper`:
```python-repl
>>> group_by = np.array(['first', 'first', 'second'])
>>> grouped_wrapper = wrapper.copy(group_by=group_by)
>>> grouped_ma = vbt.MappedArray(grouped_wrapper, a, col_arr, idx_arr=idx_arr)
>>> grouped_ma.mean()
first 12.5
second 17.0
dtype: float64
```
* Regroup an existing `MappedArray`:
```python-repl
>>> ma.regroup(group_by).mean()
first 12.5
second 17.0
dtype: float64
```
* Pass `group_by` directly to the reducing method:
```python-repl
>>> ma.mean(group_by=group_by)
first 12.5
second 17.0
dtype: float64
```
By the same way you can disable or modify any existing grouping:
```python-repl
>>> grouped_ma.mean(group_by=False)
a 11.0
b 14.0
c 17.0
dtype: float64
```
!!! note
Grouping applies only to reducing operations, there is no change to the arrays.
## Operators
`MappedArray` implements arithmetic, comparison and logical operators. You can perform basic
operations (such as addition) on mapped arrays as if they were NumPy arrays.
```python-repl
>>> ma ** 2
<vectorbt.records.mapped_array.MappedArray at 0x7f97bfc49358>
>>> ma * np.array([1, 2, 3, 4, 5, 6])
<vectorbt.records.mapped_array.MappedArray at 0x7f97bfc65e80>
>>> ma + ma
<vectorbt.records.mapped_array.MappedArray at 0x7fd638004d30>
```
!!! note
You should ensure that your `MappedArray` operand is on the left if the other operand is an array.
If two `MappedArray` operands have different metadata, will copy metadata from the first one,
but at least their `id_arr` and `col_arr` must match.
## Indexing
Like any other class subclassing `vectorbt.base.array_wrapper.Wrapping`, we can do pandas indexing
on a `MappedArray` instance, which forwards indexing operation to each object with columns:
```python-repl
>>> ma['a'].values
array([10., 11., 12.])
>>> grouped_ma['first'].values
array([10., 11., 12., 13., 14., 15.])
```
!!! note
Changing index (time axis) is not supported. The object should be treated as a Series
rather than a DataFrame; for example, use `some_field.iloc[0]` instead of `some_field.iloc[:, 0]`.
Indexing behavior depends solely upon `vectorbt.base.array_wrapper.ArrayWrapper`.
For example, if `group_select` is enabled indexing will be performed on groups,
otherwise on single columns.
## Caching
`MappedArray` supports caching. If a method or a property requires heavy computation, it's wrapped
with `vectorbt.utils.decorators.cached_method` and `vectorbt.utils.decorators.cached_property`
respectively. Caching can be disabled globally via `caching` in `vectorbt._settings.settings`.
!!! note
Because of caching, class is meant to be immutable and all properties are read-only.
To change any attribute, use the `copy` method and pass the attribute as keyword argument.
## Saving and loading
Like any other class subclassing `vectorbt.utils.config.Pickleable`, we can save a `MappedArray`
instance to the disk with `MappedArray.save` and load it with `MappedArray.load`.
"""
import numpy as np
import pandas as pd
from vectorbt import _typing as tp
from vectorbt.utils import checks
from vectorbt.utils.decorators import cached_method
from vectorbt.utils.enum import enum_to_value_map
from vectorbt.utils.config import merge_dicts
from vectorbt.base.reshape_fns import to_1d
from vectorbt.base.class_helpers import (
add_binary_magic_methods,
add_unary_magic_methods,
binary_magic_methods,
unary_magic_methods
)
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
from vectorbt.generic import nb as generic_nb
from vectorbt.records import nb
from vectorbt.records.col_mapper import ColumnMapper
MappedArrayT = tp.TypeVar("MappedArrayT", bound="MappedArray")
IndexingMetaT = tp.Tuple[
ArrayWrapper,
tp.Array1d,
tp.Array1d,
tp.Array1d,
tp.Optional[tp.Array1d],
tp.MaybeArray,
tp.Array1d
]
def combine_mapped_with_other(self: MappedArrayT, other: tp.Union["MappedArray", tp.ArrayLike],
np_func: tp.Callable[[tp.ArrayLike, tp.ArrayLike], tp.Array1d]) -> MappedArrayT:
"""Combine `MappedArray` with other compatible object.
If other object is also `MappedArray`, their `id_arr` and `col_arr` must match."""
if isinstance(other, MappedArray):
checks.assert_array_equal(self.id_arr, other.id_arr)
checks.assert_array_equal(self.col_arr, other.col_arr)
other = other.values
return self.copy(mapped_arr=np_func(self.values, other))
@add_binary_magic_methods(
binary_magic_methods,
combine_mapped_with_other
)
@add_unary_magic_methods(
unary_magic_methods,
lambda self, np_func: self.copy(mapped_arr=np_func(self.values))
)
class MappedArray(Wrapping):
"""Exposes methods for reducing, converting, and plotting arrays mapped by
`vectorbt.records.base.Records` class.
Args:
wrapper (ArrayWrapper): Array wrapper.
See `vectorbt.base.array_wrapper.ArrayWrapper`.
mapped_arr (array_like): A one-dimensional array of mapped record values.
col_arr (array_like): A one-dimensional column array.
Must be of the same size as `mapped_arr`.
id_arr (array_like): A one-dimensional id array. Defaults to simple range.
Must be of the same size as `mapped_arr`.
idx_arr (array_like): A one-dimensional index array. Optional.
Must be of the same size as `mapped_arr`.
value_map (namedtuple, dict or callable): Value map.
**kwargs: Custom keyword arguments passed to the config.
Useful if any subclass wants to extend the config.
"""
def __init__(self, wrapper: ArrayWrapper, mapped_arr: tp.ArrayLike, col_arr: tp.ArrayLike,
id_arr: tp.Optional[tp.ArrayLike] = None, idx_arr: tp.Optional[tp.ArrayLike] = None,
value_map: tp.Optional[tp.ValueMapLike] = None, **kwargs) -> None:
Wrapping.__init__(
self,
wrapper,
mapped_arr=mapped_arr,
col_arr=col_arr,
id_arr=id_arr,
idx_arr=idx_arr,
value_map=value_map,
**kwargs
)
mapped_arr = np.asarray(mapped_arr)
col_arr = np.asarray(col_arr)
checks.assert_shape_equal(mapped_arr, col_arr, axis=0)
if id_arr is None:
id_arr = np.arange(len(mapped_arr))
else:
id_arr = np.asarray(id_arr)
if idx_arr is not None:
idx_arr = np.asarray(idx_arr)
checks.assert_shape_equal(mapped_arr, idx_arr, axis=0)
if value_map is not None:
if checks.is_namedtuple(value_map):
value_map = enum_to_value_map(value_map)
self._mapped_arr = mapped_arr
self._id_arr = id_arr
self._col_arr = col_arr
self._idx_arr = idx_arr
self._value_map = value_map
self._col_mapper = ColumnMapper(wrapper, col_arr)
def indexing_func_meta(self, pd_indexing_func: tp.PandasIndexingFunc, **kwargs) -> IndexingMetaT:
"""Perform indexing on `MappedArray` and return metadata."""
new_wrapper, _, group_idxs, col_idxs = \
self.wrapper.indexing_func_meta(pd_indexing_func, column_only_select=True, **kwargs)
new_indices, new_col_arr = self.col_mapper._col_idxs_meta(col_idxs)
new_mapped_arr = self.values[new_indices]
new_id_arr = self.id_arr[new_indices]
if self.idx_arr is not None:
new_idx_arr = self.idx_arr[new_indices]
else:
new_idx_arr = None
return new_wrapper, new_mapped_arr, new_col_arr, new_id_arr, new_idx_arr, group_idxs, col_idxs
def indexing_func(self: MappedArrayT, pd_indexing_func: tp.PandasIndexingFunc, **kwargs) -> MappedArrayT:
"""Perform indexing on `MappedArray`."""
new_wrapper, new_mapped_arr, new_col_arr, new_id_arr, new_idx_arr, _, _ = \
self.indexing_func_meta(pd_indexing_func, **kwargs)
return self.copy(
wrapper=new_wrapper,
mapped_arr=new_mapped_arr,
col_arr=new_col_arr,
id_arr=new_id_arr,
idx_arr=new_idx_arr
)
@property
def mapped_arr(self) -> tp.Array1d:
"""Mapped array."""
return self._mapped_arr
@property
def values(self) -> tp.Array1d:
"""Mapped array."""
return self.mapped_arr
def __len__(self) -> int:
return len(self.values)
@property
def col_arr(self) -> tp.Array1d:
"""Column array."""
return self._col_arr
@property
def col_mapper(self) -> ColumnMapper:
"""Column mapper.
See `vectorbt.records.col_mapper.ColumnMapper`."""
return self._col_mapper
@property
def id_arr(self) -> tp.Array1d:
"""Id array."""
return self._id_arr
@property
def idx_arr(self) -> tp.Optional[tp.Array1d]:
"""Index array."""
return self._idx_arr
@property
def value_map(self) -> tp.Optional[tp.ValueMap]:
"""Value map."""
return self._value_map
@cached_method
def is_sorted(self, incl_id: bool = False) -> bool:
"""Check whether mapped array is sorted."""
if incl_id:
return nb.is_col_idx_sorted_nb(self.col_arr, self.id_arr)
return nb.is_col_sorted_nb(self.col_arr)
def sort(self: MappedArrayT, incl_id: bool = False, idx_arr: tp.Optional[tp.Array1d] = None,
group_by: tp.GroupByLike = None, **kwargs) -> MappedArrayT:
"""Sort mapped array by column array (primary) and id array (secondary, optional)."""
if idx_arr is None:
idx_arr = self.idx_arr
if self.is_sorted(incl_id=incl_id):
return self.copy(idx_arr=idx_arr, **kwargs).regroup(group_by)
if incl_id:
ind = np.lexsort((self.id_arr, self.col_arr)) # expensive!
else:
ind = np.argsort(self.col_arr)
return self.copy(
mapped_arr=self.values[ind],
col_arr=self.col_arr[ind],
id_arr=self.id_arr[ind],
idx_arr=idx_arr[ind] if idx_arr is not None else None,
**kwargs
).regroup(group_by)
def filter_by_mask(self: MappedArrayT, mask: tp.Array1d, idx_arr: tp.Optional[tp.Array1d] = None,
group_by: tp.GroupByLike = None, **kwargs) -> MappedArrayT:
"""Return a new class instance, filtered by mask."""
if idx_arr is None:
idx_arr = self.idx_arr
return self.copy(
mapped_arr=self.values[mask],
col_arr=self.col_arr[mask],
id_arr=self.id_arr[mask],
idx_arr=idx_arr[mask] if idx_arr is not None else None,
**kwargs
).regroup(group_by)
def map_to_mask(self, inout_map_func_nb: tp.MaskInOutMapFunc, *args,
group_by: tp.GroupByLike = None) -> tp.Array1d:
"""Map mapped array to a mask.
See `vectorbt.records.nb.mapped_to_mask_nb`."""
col_map = self.col_mapper.get_col_map(group_by=group_by)
return nb.mapped_to_mask_nb(self.values, col_map, inout_map_func_nb, *args)
@cached_method
def top_n_mask(self, n: int, **kwargs) -> tp.Array1d:
"""Return mask of top N elements in each column."""
return self.map_to_mask(nb.top_n_inout_map_nb, n, **kwargs)
@cached_method
def bottom_n_mask(self, n: int, **kwargs) -> tp.Array1d:
"""Return mask of bottom N elements in each column."""
return self.map_to_mask(nb.bottom_n_inout_map_nb, n, **kwargs)
@cached_method
def top_n(self: MappedArrayT, n: int, **kwargs) -> MappedArrayT:
"""Filter top N elements from each column."""
return self.filter_by_mask(self.top_n_mask(n), **kwargs)
@cached_method
def bottom_n(self: MappedArrayT, n: int, **kwargs) -> MappedArrayT:
"""Filter bottom N elements from each column."""
return self.filter_by_mask(self.bottom_n_mask(n), **kwargs)
@cached_method
def is_expandable(self, idx_arr: tp.Optional[tp.Array1d] = None, group_by: tp.GroupByLike = None) -> bool:
"""See `vectorbt.records.nb.is_mapped_expandable_nb`."""
if idx_arr is None:
if self.idx_arr is None:
raise ValueError("Must pass idx_arr")
idx_arr = self.idx_arr
col_arr = self.col_mapper.get_col_arr(group_by=group_by)
target_shape = self.wrapper.get_shape_2d(group_by=group_by)
return nb.is_mapped_expandable_nb(col_arr, idx_arr, target_shape)
def to_pd(self, idx_arr: tp.Optional[tp.Array1d] = None, ignore_index: bool = False,
default_val: float = np.nan, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Expand mapped array to a Series/DataFrame.
If `ignore_index`, will ignore the index and stack data points on top of each other in every column
(see `vectorbt.records.nb.stack_expand_mapped_nb`). Otherwise, see `vectorbt.records.nb.expand_mapped_nb`.
!!! note
Will raise an error if there are multiple values pointing to the same position.
Set `ignore_index` to True in this case.
!!! warning
Mapped arrays represent information in the most memory-friendly format.
Mapping back to pandas may occupy lots of memory if records are sparse."""
if ignore_index:
if self.wrapper.ndim == 1:
return self.wrapper.wrap(
self.values,
index=np.arange(len(self.values)),
group_by=group_by,
**merge_dicts({}, wrap_kwargs)
)
col_map = self.col_mapper.get_col_map(group_by=group_by)
out = nb.stack_expand_mapped_nb(self.values, col_map, default_val)
return self.wrapper.wrap(
out, index=np.arange(out.shape[0]),
group_by=group_by, **merge_dicts({}, wrap_kwargs))
if idx_arr is None:
if self.idx_arr is None:
raise ValueError("Must pass idx_arr")
idx_arr = self.idx_arr
if not self.is_expandable(idx_arr=idx_arr, group_by=group_by):
raise ValueError("Multiple values are pointing to the same position. Use ignore_index.")
col_arr = self.col_mapper.get_col_arr(group_by=group_by)
target_shape = self.wrapper.get_shape_2d(group_by=group_by)
out = nb.expand_mapped_nb(self.values, col_arr, idx_arr, target_shape, default_val)
return self.wrapper.wrap(out, group_by=group_by, **merge_dicts({}, wrap_kwargs))
def reduce(self, reduce_func_nb: tp.ReduceFunc, *args, idx_arr: tp.Optional[tp.Array1d] = None,
to_array: bool = False, to_idx: bool = False, idx_labeled: bool = True,
default_val: float = np.nan, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeriesFrame:
"""Reduce mapped array by column.
If `to_array` is False and `to_idx` is False, see `vectorbt.records.nb.reduce_mapped_nb`.
If `to_array` is False and `to_idx` is True, see `vectorbt.records.nb.reduce_mapped_to_idx_nb`.
If `to_array` is True and `to_idx` is False, see `vectorbt.records.nb.reduce_mapped_to_array_nb`.
If `to_array` is True and `to_idx` is True, see `vectorbt.records.nb.reduce_mapped_to_idx_array_nb`.
If `to_idx` is True, must pass `idx_arr`. Set `idx_labeled` to False to return raw positions instead
of labels. Use `default_val` to set the default value. Set `group_by` to False to disable grouping.
"""
# Perform checks
checks.assert_numba_func(reduce_func_nb)
if idx_arr is None:
if self.idx_arr is None:
if to_idx:
raise ValueError("Must pass idx_arr")
idx_arr = self.idx_arr
# Perform main computation
col_map = self.col_mapper.get_col_map(group_by=group_by)
if not to_array:
if not to_idx:
out = nb.reduce_mapped_nb(
self.values,
col_map,
default_val,
reduce_func_nb,
*args
)
else:
out = nb.reduce_mapped_to_idx_nb(
self.values,
col_map,
idx_arr,
default_val,
reduce_func_nb,
*args
)
else:
if not to_idx:
out = nb.reduce_mapped_to_array_nb(
self.values,
col_map,
default_val,
reduce_func_nb,
*args
)
else:
out = nb.reduce_mapped_to_idx_array_nb(
self.values,
col_map,
idx_arr,
default_val,
reduce_func_nb,
*args
)
# Perform post-processing
if to_idx:
nan_mask = np.isnan(out)
if idx_labeled:
out = out.astype(object)
out[~nan_mask] = self.wrapper.index[out[~nan_mask].astype(np.int_)]
else:
out[nan_mask] = -1
out = out.astype(np.int_)
wrap_kwargs = merge_dicts(dict(name_or_index='reduce' if not to_array else None), wrap_kwargs)
return self.wrapper.wrap_reduced(out, group_by=group_by, **wrap_kwargs)
@cached_method
def nst(self, n: int, **kwargs) -> tp.MaybeSeries:
"""Return nst element of each column."""
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index='nst')), kwargs)
return self.reduce(generic_nb.nst_reduce_nb, n, to_array=False, to_idx=False, **kwargs)
@cached_method
def min(self, **kwargs) -> tp.MaybeSeries:
"""Return min by column."""
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index='min')), kwargs)
return self.reduce(generic_nb.min_reduce_nb, to_array=False, to_idx=False, **kwargs)
@cached_method
def max(self, **kwargs) -> tp.MaybeSeries:
"""Return max by column."""
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index='max')), kwargs)
return self.reduce(generic_nb.max_reduce_nb, to_array=False, to_idx=False, **kwargs)
@cached_method
def mean(self, **kwargs) -> tp.MaybeSeries:
"""Return mean by column."""
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index='mean')), kwargs)
return self.reduce(generic_nb.mean_reduce_nb, to_array=False, to_idx=False, **kwargs)
@cached_method
def median(self, **kwargs) -> tp.MaybeSeries:
"""Return median by column."""
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index='median')), kwargs)
return self.reduce(generic_nb.median_reduce_nb, to_array=False, to_idx=False, **kwargs)
@cached_method
def std(self, ddof: int = 1, **kwargs) -> tp.MaybeSeries:
"""Return std by column."""
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index='std')), kwargs)
return self.reduce(generic_nb.std_reduce_nb, ddof, to_array=False, to_idx=False, **kwargs)
@cached_method
def sum(self, default_val: float = 0., **kwargs) -> tp.MaybeSeries:
"""Return sum by column."""
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index='sum')), kwargs)
return self.reduce(
generic_nb.sum_reduce_nb,
to_array=False,
to_idx=False,
default_val=default_val,
**kwargs
)
@cached_method
def idxmin(self, **kwargs) -> tp.MaybeSeries:
"""Return index of min by column."""
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index='idxmin')), kwargs)
return self.reduce(generic_nb.argmin_reduce_nb, to_array=False, to_idx=True, **kwargs)
@cached_method
def idxmax(self, **kwargs) -> tp.MaybeSeries:
"""Return index of max by column."""
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index='idxmax')), kwargs)
return self.reduce(generic_nb.argmax_reduce_nb, to_array=False, to_idx=True, **kwargs)
@cached_method
def describe(self, percentiles: tp.Optional[tp.ArrayLike] = None, ddof: int = 1, **kwargs) -> tp.SeriesFrame:
"""Return statistics by column."""
if percentiles is not None:
percentiles = to_1d(percentiles, raw=True)
else:
percentiles = np.array([0.25, 0.5, 0.75])
percentiles = percentiles.tolist()
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.unique(percentiles)
perc_formatted = pd.io.formats.format.format_percentiles(percentiles)
index = pd.Index(['count', 'mean', 'std', 'min', *perc_formatted, 'max'])
kwargs = merge_dicts(dict(wrap_kwargs=dict(name_or_index=index)), kwargs)
out = self.reduce(
generic_nb.describe_reduce_nb,
percentiles,
ddof,
to_array=True,
to_idx=False,
**kwargs
)
if isinstance(out, pd.DataFrame):
out.loc['count'].fillna(0., inplace=True)
else:
if np.isnan(out.loc['count']):
out.loc['count'] = 0.
return out
@cached_method
def count(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return count by column."""
wrap_kwargs = merge_dicts(dict(name_or_index='count'), wrap_kwargs)
return self.wrapper.wrap_reduced(
self.col_mapper.get_col_map(group_by=group_by)[1],
group_by=group_by, **wrap_kwargs)
@cached_method
def value_counts(self, group_by: tp.GroupByLike = None, value_map: tp.Optional[tp.ValueMapLike] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Return a pandas object containing counts of unique values."""
mapped_codes, mapped_uniques = | pd.factorize(self.values) | pandas.factorize |
# Copyright (C) 2014-2017 <NAME>, <NAME>, <NAME>, <NAME> (in alphabetic order)
#
# This file is part of OpenModal.
#
# OpenModal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# OpenModal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenModal. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 20. maj 2014
TODO: That mnums in the beginning of every function, thats bad!
TODO: Alot of refactoring.
TODO: Put tables in a dictionary, that way you have a nice overview
of what is inside and also is much better :)
@author: Matjaz
'''
import time
import os
import itertools
from datetime import datetime
import pandas as pd
from pandas import ExcelWriter
from OpenModal.anim_tools import zyx_euler_to_rotation_matrix
import numpy as np
import pyuff
import OpenModal.utils as ut
# import _transformations as tr
# Uff fields definitions (human-readable).
types = dict()
types[15] = 'Geometry'
types[82] = 'Lines'
types[151] = 'Header'
types[2411] = 'Geometry'
types[164] = 'Units'
types[58] = 'Measurement'
types[55] = 'Analysis'
types[2420] = 'Coor. sys.'
types[18] = 'Coor. sys.'
# Function type definition.
FUNCTION_TYPE = dict()
FUNCTION_TYPE['General'] = 0 # also: unknown
FUNCTION_TYPE['Time Response'] = 1
FUNCTION_TYPE['Auto Spectrum'] = 2
FUNCTION_TYPE['Cross Spectrum'] = 3
FUNCTION_TYPE['Frequency Response Function'] = 4
FUNCTION_TYPE['Transmissibility'] = 5
FUNCTION_TYPE['Coherence'] = 6
FUNCTION_TYPE['Auto Correlation'] = 7
FUNCTION_TYPE['Cross Correlation'] = 8
FUNCTION_TYPE['Power Spectral Density (PSD)'] = 9
FUNCTION_TYPE['Energy Spectral Density (ESD)'] = 10
FUNCTION_TYPE['Probability Density Function'] = 11
FUNCTION_TYPE['Spectrum'] = 12
FUNCTION_TYPE['Cumulative Frequency Distribution'] = 13
FUNCTION_TYPE['Peaks Valley'] = 14
FUNCTION_TYPE['Stress/Cycles'] = 15
FUNCTION_TYPE['Strain/Cycles'] = 16
FUNCTION_TYPE['Orbit'] = 17
FUNCTION_TYPE['Mode Indicator Function'] = 18
FUNCTION_TYPE['Force Pattern'] = 19
FUNCTION_TYPE['Partial Power'] = 20
FUNCTION_TYPE['Partial Coherence'] = 21
FUNCTION_TYPE['Eigenvalue'] = 22
FUNCTION_TYPE['Eigenvector'] = 23
FUNCTION_TYPE['Shock Response Spectrum'] = 24
FUNCTION_TYPE['Finite Impulse Response Filter'] = 25
FUNCTION_TYPE['Multiple Coherence'] = 26
FUNCTION_TYPE['Order Function'] = 27
FUNCTION_TYPE['Phase Compensation'] = 28
# Specific data type for abscisa/ordinate
SPECIFIC_DATA_TYPE = dict()
SPECIFIC_DATA_TYPE['unknown'] = 0
SPECIFIC_DATA_TYPE['general'] = 1
SPECIFIC_DATA_TYPE['stress'] = 2
SPECIFIC_DATA_TYPE['strain'] = 3
SPECIFIC_DATA_TYPE['temperature'] = 5
SPECIFIC_DATA_TYPE['heat flux'] = 6
SPECIFIC_DATA_TYPE['displacement'] = 8
SPECIFIC_DATA_TYPE['reaction force'] = 9
SPECIFIC_DATA_TYPE['velocity'] = 11
SPECIFIC_DATA_TYPE['acceleration'] = 12
SPECIFIC_DATA_TYPE['excitation force'] = 13
SPECIFIC_DATA_TYPE['pressure'] = 15
SPECIFIC_DATA_TYPE['mass'] = 16
SPECIFIC_DATA_TYPE['time'] = 17
SPECIFIC_DATA_TYPE['frequency'] = 18
SPECIFIC_DATA_TYPE['rpm'] = 19
SPECIFIC_DATA_TYPE['order'] = 20
SPECIFIC_DATA_TYPE['sound pressure'] = 21
SPECIFIC_DATA_TYPE['sound intensity'] = 22
SPECIFIC_DATA_TYPE['sound power'] = 23
# TODO: Fast get and set. Check setting with enlargement.
class ModalData(object):
"""The data object holds all measurement, results and geometry data
"""
def __init__(self):
"""
Constructor
"""
self.create_empty()
def create_empty(self):
"""Create an empty data container."""
# Tables
self.tables = dict()
# Holds the tables, populated by importing a uff file.
# TODO: This is temporary? Maybe, maybe not, might be
# a good idea to have some reference of imported data!
self.uff_import_tables = dict()
self.create_info_table()
self.create_geometry_table()
self.create_measurement_table()
self.create_analysis_table()
self.create_lines_table()
self.create_elements_table()
# Set model id
self.model_id = 0
def create_info_table(self):
"""Creates an empty info table."""
self.tables['info'] = pd.DataFrame(columns=['model_id', 'model_name', 'description', 'units_code', 'length',
'force', 'temp', 'temp_offset'])
# self.tables['info'] = pd.DataFrame(columns=['model_id', 'uffid', 'value'])
def create_geometry_table(self):
"""Creates an empty geometry table."""
self.tables['geometry'] = pd.DataFrame(columns=['model_id', 'uffid', 'node_nums',
'x', 'y', 'z', 'thx', 'thy', 'thz',
'disp_cs', 'def_cs',
'color','clr_r','clr_g','clr_b','clr_a',
'r','phi','cyl_thz'])
def create_measurement_table(self):
"""Creates an empty measurement table."""
self.tables['measurement_index'] = pd.DataFrame(columns=['model_id', 'measurement_id', 'uffid', 'field_type', 'excitation_type',
'func_type', 'rsp_node', 'rsp_dir', 'ref_node',
'ref_dir', 'abscissa_spec_data_type',
'ordinate_spec_data_type', 'orddenom_spec_data_type', 'zero_padding'], dtype=int)
self.tables['measurement_values'] = pd.DataFrame(columns=['model_id', 'measurement_id', 'frq', 'amp'])
self.tables['measurement_values'].amp = self.tables['measurement_values'].amp.astype('complex')
self.tables['measurement_values_td'] = pd.DataFrame(columns=['model_id', 'measurement_id', 'n_avg', 'x_axis',
'excitation', 'response'])
def create_analysis_table(self):
"""Creates an empty analysis table."""
self.tables['analysis_index'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method', 'uffid',
'field_type', 'analysis_type', 'data_ch',
'spec_data_type', 'load_case', 'mode_n', 'eig', 'freq',
'freq_step_n', 'node_nums', 'rsp_node', 'rsp_dir',
'ref_node', 'ref_dir', 'data_type', 'ref_node', 'ref_dir',
'data_type', 'eig_real','eig_xi', 'spots'])
self.tables['analysis_values'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method', 'mode_n',
'node_nums', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6'])
self.tables['analysis_settings'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method',
'f_min','f_max', 'nmax', 'err_fn', 'err_xi', ])
self.tables['analysis_stabilisation'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method',
'pos', 'size', 'pen_color', 'pen_width',
'symbol', 'brush', 'damp'])
self.tables['analysis_index'].eig = self.tables['analysis_index'].eig.astype('complex')
self.tables['analysis_values'].r1 = self.tables['analysis_values'].r1.astype('complex')
self.tables['analysis_values'].r2 = self.tables['analysis_values'].r2.astype('complex')
self.tables['analysis_values'].r3 = self.tables['analysis_values'].r3.astype('complex')
self.tables['analysis_values'].r4 = self.tables['analysis_values'].r4.astype('complex')
self.tables['analysis_values'].r5 = self.tables['analysis_values'].r5.astype('complex')
self.tables['analysis_values'].r6 = self.tables['analysis_values'].r6.astype('complex')
def create_lines_table(self):
"""Creates an empty lines table."""
self.tables['lines'] = pd.DataFrame(['model_id', 'uffid', 'id', 'field_type', 'trace_num',
'color', 'n_nodes', 'trace_id', 'pos', 'node'])
def create_elements_table(self):
"""Creates an empty elements table."""
# TODO: Missing 'physical property table number' and 'material property ...'
# TODO: Missing 'fe descriptor id', chosen from a list of 232(!) types!!?
# TODO: Missing beam support.
self.tables['elements_index'] = pd.DataFrame(columns=['model_id', 'element_id', 'element_descriptor', 'color',
'nr_of_nodes','clr_r','clr_g','clr_b','clr_a'])
self.tables['elements_values'] = pd.DataFrame(columns=['model_id', 'element_id', 'node_id', 'node_pos'])
def new_model(self, model_id=-1, entries=dict()):
"""Set new model id. Values can be set through entries dictionary, for each
value left unset, default will be used."""
if model_id == -1:
# Create a new model_id. First check if table is empty.
current_models = self.tables['info'].model_id
if current_models.size == 0:
model_id = 0
else:
model_id = current_models.max() + 1
fields = {'db_app': 'ModalData', 'time_db_created': time.strftime("%d-%b-%y %H:%M:%S"),
'time_db_saved': time.strftime("%d-%b-%y %H:%M:%S"), 'program': 'OpenModal',
'model_name': 'DefaultName', 'description': 'DefaultDecription', 'units_code': 9,
'temp': 1, 'temp_mode': 1, 'temp_offset': 1, 'length': 1, 'force': 1,
'units_description': 'User unit system'}
for key in entries:
fields[key] = entries[key]
# TODO: Check if model_id already exists.
input = [model_id, fields['model_name'], fields['description'], fields['units_code'], fields['length'],
fields['force'], fields['temp'], fields['temp_offset']]
new_model = pd.DataFrame([input], columns=['model_id', 'model_name', 'description', 'units_code', 'length',
'force', 'temp', 'temp_offset'])
self.tables['info'] = pd.concat([self.tables['info'], new_model], ignore_index=True)
return model_id
def new_measurement(self, model_id, excitation_type, frequency, h, reference=[0, 0], response=[0, 0],
function_type='Frequency Response Function', abscissa='frequency', ordinate='acceleration',
denominator='excitation force', zero_padding=0, td_x_axis=np.array([]),
td_excitation=None, td_response=None):
"""Add a new measurement."""
# Check if model id exists.
if self.tables['info'].model_id.size == 0:
raise ValueError
elif not any(self.tables['info'].model_id == model_id):
raise ValueError
# Prepare a new measurement_id.
if self.tables['measurement_index'].measurement_id.size == 0:
measurement_id = 0
else:
measurement_id = self.tables['measurement_index'].measurement_id.max() + 1
newentry_idx = pd.DataFrame([[model_id, measurement_id, excitation_type, FUNCTION_TYPE[function_type], response[0],
response[1], reference[0], reference[1], SPECIFIC_DATA_TYPE[abscissa],
SPECIFIC_DATA_TYPE[ordinate], SPECIFIC_DATA_TYPE[denominator], zero_padding]],
columns=['model_id', 'measurement_id', 'excitation_type', 'func_type', 'rsp_node', 'rsp_dir',
'ref_node', 'ref_dir', 'abscissa_spec_data_type',
'ordinate_spec_data_type', 'orddenom_spec_data_type', 'zero_padding'])
self.tables['measurement_index'] = pd.concat([ self.tables['measurement_index'],
newentry_idx], ignore_index=True)
# Add entry with measured frf.
newentry_val = pd.DataFrame(columns=['model_id', 'measurement_id', 'frq', 'amp'])
newentry_val['frq'] = frequency
newentry_val['amp'] = h
newentry_val['model_id'] = model_id
newentry_val['measurement_id'] = measurement_id
self.tables['measurement_values'] = pd.concat([self.tables['measurement_values'],
newentry_val], ignore_index=True)
# if td_x_axis.size > 0:
# # TODO: Create it with size you already know. Should be faster?
# newentry_val_td = pd.DataFrame(columns=['model_id', 'measurement_id', 'x_axis', 'excitation', 'response'])
# newentry_val_td['x_axis'] = td_x_axis
# newentry_val_td['excitation'] = td_excitation
# newentry_val_td['response'] = td_response
# newentry_val_td['model_id'] = model_id
# newentry_val_td['measurement_id'] = measurement_id
#
# self.tables['measurement_values_td'] = pd.concat([self.tables['measurement_values_td'], newentry_val_td],
# ignore_index=True)
if td_x_axis.size > 0:
n_averages = len(td_response)
i = 0
# TODO: Optimize here.
for td_excitation_i, td_response_i in zip(td_excitation, td_response):
# TODO: Create it with size you already know. Should be faster?
newentry_val_td = pd.DataFrame(columns=['model_id', 'measurement_id', 'n_avg',
'x_axis', 'excitation', 'response'])
newentry_val_td['x_axis'] = td_x_axis
newentry_val_td['excitation'] = td_excitation_i
newentry_val_td['response'] = td_response_i
newentry_val_td['model_id'] = model_id
newentry_val_td['measurement_id'] = measurement_id
newentry_val_td['n_avg'] = i
i += 1
self.tables['measurement_values_td'] = pd.concat([self.tables['measurement_values_td'], newentry_val_td],
ignore_index=True)
def remove_model(self, model_id):
"""Remove all data connected to the supplied model id."""
try:
el_idx = self.tables['elements_index']
el_vals = self.tables['elements_values']
elements_id = el_idx[el_idx.model_id == model_id].element_id
self.tables['elements_values'] = self.tables['elements_values'][~el_vals.element_id.isin(elements_id)]
self.tables['elements_index'] = self.tables['elements_index'][el_idx.model_id != model_id]
except AttributeError:
print('There is no element data to delete.')
try:
lines = self.tables['lines']
self.tables['lines'] = self.tables['lines'][lines.model_id != model_id]
except AttributeError:
print('There is no line data to delete.')
try:
an_idx = self.tables['analysis_index']
an_vals = self.tables['analysis_values']
analysis_id = an_idx[an_idx.model_id == model_id].analysis_id
self.tables['analysis_values'] = self.tables['analysis_values'][~an_vals.element_id.isin(analysis_id)]
self.tables['analysis_index'] = self.tables['analysis_index'][an_idx.model_id != model_id]
except AttributeError:
print('There is no analysis data to delete.')
try:
me_idx = self.tables['measurement_index']
me_vals = self.tables['measurement_values']
me_vals_td = self.tables['measurement_values_td']
measurement_id = me_idx[me_idx.model_id == model_id].measurement_id
self.tables['measurement_values_td'] = self.tables['measurement_values_td'][~me_vals_td.measurement_id.isin(measurement_id)]
self.tables['measurement_values'] = self.tables['measurement_values'][~me_vals.measurement_id.isin(measurement_id)]
self.tables['measurement_index'] = self.tables['measurement_index'][me_idx.model_id != model_id]
except AttributeError:
print('There is no measurement data to delete.')
try:
geometry = self.tables['geometry']
self.tables['geometry'] = self.tables['geometry'][geometry.model_id != model_id]
except AttributeError:
print('There is no geometry data to delete.')
try:
info = self.tables['info']
self.tables['info'] = self.tables['info'][info.model_id != model_id]
except AttributeError:
print('There is no info data to delete.')
def import_uff(self, fname):
"""Pull data from uff."""
# Make sure you start with new model ids at the appropriate index.
if self.tables['info'].model_id.size > 0:
base_key = self.tables['info'].model_id.max() + 1
else:
base_key=0
uffdata = ModalDataUff(fname, base_key=base_key)
for key in self.tables.keys():
if key in uffdata.tables:
# uffdata.tables[key].model_id += 100
self.tables[key] = pd.concat([self.tables[key], uffdata.tables[key]], ignore_index=True)
self.uff_import_tables[key] = ''
self.file_structure = uffdata.file_structure
def export_to_uff(self, fname, model_ids=[], data_types=[], separate_files_flag=False):
"""Export data to uff."""
model_ids = self.tables['info'].model_id.unique()
if len(model_ids) == 0:
model_ids = self.tables['info'].model_id.unique()
if len(data_types) == 0:
data_types = ['nodes', 'lines', 'elements', 'measurements', 'analyses']
if len(model_ids) == 0:
print('Warning: Empty tables. (No model_ids found).')
return False
t = datetime.now()
folder_timestamp = 'OpenModal Export UFF -- {:%Y %d-%m %H-%M-%S}'.format(t)
export_folder = os.path.join(fname, folder_timestamp)
try:
os.mkdir(export_folder)
except:
print('Warning: File exists. Try again later ...')
return False
for model_id in model_ids:
# -- Write info.
dfi = self.tables['info']
dfi = dfi[dfi.model_id == model_id]
# TODO: Do not overwrite this dfi
model_name = dfi.model_name.values[0]
if not separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}.uff'.format(model_name, model_id)))
if len(dfi) != 0:
dset_info = {'db_app': 'modaldata v1',
'model_name': dfi.model_name.values[0],
'description': dfi.description.values[0],
'program': 'Open Modal'}
dset_units = {'units_code': dfi.units_code.values[0],
# TODO: Maybe implement other data.
# 'units_description': dfi.units_description,
# 'temp_mode': dfi.temp_mode,
'length': dfi.length.values[0],
'force': dfi.force.values[0],
'temp': dfi.temp.values[0],
'temp_offset': dfi.temp_offset.values[0]}
# for key in dset_info.keys():
# dset_info[key] = dset_info[key].value.values[0]
dset_info['type'] = 151
# for key in dset_units.keys():
# dset_units[key] = dset_units[key].value.values[0]
dset_units['type'] = 164
if separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}_info.uff'.format(model_name, model_id)))
uffwrite._write_set(dset_info, mode='add')
uffwrite._write_set(dset_units, mode='add')
# -- Write Geometry.
if 'nodes' in data_types:
dfg = self.tables['geometry']
#dfg = dfg[dfg.model_id==model_id]
#drop nan lines defined in geometry
model_id_mask=dfg.model_id==model_id
nan_mask = dfg[['node_nums','x', 'y', 'z','thz', 'thy', 'thx' , 'model_id']].notnull().all(axis=1)
comb_mask = model_id_mask & nan_mask
dfg = dfg[comb_mask]
if len(dfg) != 0:
# .. First the coordinate systems. Mind the order of angles (ZYX)
size = len(dfg)
local_cs = np.zeros((size * 4, 3), dtype=float)
th_angles = dfg[['thz', 'thy', 'thx']].values
for i in range(size):
#local_cs[i*4:i*4+3, :] = ut.zyx_euler_to_rotation_matrix(th_angles[i, :])
local_cs[i*4:i*4+3, :] = zyx_euler_to_rotation_matrix(th_angles[i, :]*np.pi/180.)
local_cs[i*4+3, :] = 0.0
dset_cs = {'local_cs': local_cs, 'nodes': dfg[['node_nums']].values, 'type': 2420}
uffwrite._write_set(dset_cs, mode='add')
# .. Then points.
dset_geometry = {'grid_global': dfg[['node_nums', 'x', 'y', 'z']].values,
'export_cs_number': 0,
'cs_color': 8,
'type': 2411}
if separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}_nodes.uff'.format(model_name, model_id)))
uffwrite._write_set(dset_geometry, mode='add')
# -- Write Measurements.
if 'measurements' in data_types:
dfi = self.tables['measurement_index']
dfi = dfi[dfi.model_id == model_id]
dfi.field_type = 58
if len(dfi) != 0:
dfv = self.tables['measurement_values']
dfv = dfv[dfv.model_id == model_id]
for id, measurement in dfi.iterrows():
data = dfv[dfv.measurement_id == measurement.measurement_id]
dsets={'type': measurement['field_type'],
'func_type': measurement['func_type'],
'data': data['amp'].values.astype('complex'),
'x': data['frq'].values,
'rsp_node': measurement['rsp_node'],
'rsp_dir': measurement['rsp_dir'],
'ref_node': measurement['ref_node'],
'ref_dir': measurement['ref_dir'],
'rsp_ent_name':model_name, 'ref_ent_name':model_name}
# TODO: Make rsp_ent_name and ref_ent_name fields in measurement_index table.
if pd.isnull(measurement['abscissa_spec_data_type']):
dsets['abscissa_spec_data_type'] = 0
else:
dsets['abscissa_spec_data_type'] = measurement['abscissa_spec_data_type']
if pd.isnull(measurement['ordinate_spec_data_type']):
dsets['ordinate_spec_data_type'] = 0
else:
dsets['ordinate_spec_data_type'] = measurement['ordinate_spec_data_type']
if pd.isnull(measurement['orddenom_spec_data_type']):
dsets['orddenom_spec_data_type'] = 0
else:
dsets['orddenom_spec_data_type'] = measurement['orddenom_spec_data_type']
if separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}_measurements.uff'.format(model_name, model_id)))
uffwrite._write_set(dsets, mode='add')
def export_to_csv(self, fname, model_ids=[], data_types=[]):
"""Export data to uff."""
if len(model_ids) == 0:
model_ids = self.tables['info'].model_id.unique()
if len(data_types) == 0:
data_types = ['nodes', 'lines', 'elements', 'measurements', 'analyses']
if len(model_ids) == 0:
print('Warning: Empty tables. (No model_ids found).')
return False
t = datetime.now()
folder_timestamp = 'OpenModal Export CSV -- {:%Y %d-%m %H-%M-%S}'.format(t)
export_folder = os.path.join(fname, folder_timestamp)
try:
os.mkdir(export_folder)
except:
print('Warning: File exists. Try again later ...')
return False
for model_id in model_ids:
# -- Write info.
dfi = self.tables['info']
dfi = dfi[dfi.model_id == model_id]
model_name = '{0}_{1:.0f}'.format(dfi.model_name.values[0], model_id)
model_dir = os.path.join(export_folder, model_name)
os.mkdir(model_dir)
df_ = self.tables['info']
df_[df_.model_id == model_id].to_csv(os.path.join(model_dir, 'info.csv'))
if 'nodes' in data_types:
df_ = self.tables['geometry']
df_[df_.model_id == model_id].to_csv(os.path.join(model_dir, 'geometry.csv'))
# -- Special treatment for measurements
if 'measurements' in data_types:
measurements_dir = os.path.join(model_dir, 'measurements')
os.mkdir(measurements_dir)
df_ = self.tables['measurement_index']
df_[df_.model_id == model_id].to_csv(os.path.join(measurements_dir, 'measurements_index.csv'))
df_ = self.tables['measurement_values']
grouped_measurements = df_[df_.model_id == model_id].groupby('measurement_id')
for id, measurement in grouped_measurements:
measurement['amp_real'] = measurement.amp.real
measurement['amp_imag'] = measurement.amp.imag
measurement[['frq', 'amp_real', 'amp_imag']].to_csv(os.path.join(measurements_dir,
'measurement_{0:.0f}.csv'.format(id)),
index=False)
class ModalDataUff(object):
'''
Reads the uff file and populates the following pandas tables:
-- ModalData.measurement_index : index of all measurements from field 58
-- ModalData.geometry : index of all points with CS from fields 2411 and 15
-- ModalData.info : info about measurements
Based on the position of field in the uff file, uffid is assigned to each field in the following
maner: first field, uffid = 0, second field, uffid = 1 and so on. Columns are named based on keys
from the UFF class if possible. Fields uffid and field_type (type of field, eg. 58) are added.
Geometry table combines nodes and their respective CSs, column names are altered.
'''
def __init__(self, fname='../../unvread/data/shield.uff', base_key=0):
'''
Constructor
'''
self.uff_object = pyuff.UFF(fname)
# Start above base_key.
self.base_key = base_key
self.uff_types = self.uff_object.get_set_types()
# print(self.uff_types)
# Models
self.models = dict()
# Tables
self.tables = dict()
# Coordinate-system tables
self.localcs = pd.DataFrame(columns=['model_id', 'uffidcs', 'node_nums', 'x1', 'x2', 'x3',
'y1', 'y2', 'y3',
'z1', 'z2', 'z3'])
self.localeul = pd.DataFrame(columns=['model_id', 'uffidcs', 'node_nums', 'thx', 'thy', 'thz'])
# File structure.
self.file_structure = ['%5d %-10s' % (field, types[field]) for field in self.uff_types]
self.create_model()
def create_model(self):
"""Scans the uff file and creates a model from
geometries and data, which is then populated. The models
are grouped based on the field 151!"""
# -- Scan geometries, each geometry is one model.
mnums = list(np.nonzero(self.uff_types==151)[0])
if len(mnums) == 0:
mnums = list(np.nonzero(self.uff_types==164)[0])
# -- What if there is no geometry? Only one model then I guess ...
if len(mnums) == 0:
print('Warning: There is no INFO or UNITS field!')
self.models[0] = range(len(self.uff_types))
# .. TODO: You have to pass this warning on.
else:
# .. Define intervals, by sequential order, for each model.
for model_id, num in enumerate(mnums):
if model_id == (len(mnums)-1):
self.models[model_id] = range(num, len(self.uff_types))
else:
# .. Last model has special treatment ([x:] instead of [x:y])
self.models[model_id] = range(num, mnums[model_id+1])
for model_id, model in self.models.items():
self.populate_model(model_id+self.base_key, model)
# print(self.models)
# print(self.uff_types)
def populate_model(self, model_id, model):
"""Read all data for each model."""
model = list(model)
self.gen_measurement_table(model_id, model)
self.gen_geometry_table(model_id, model)
self.gen_analysis_table(model_id, model)
self.gen_lines_table(model_id, model)
self.gen_info_table(model_id, model)
# .. TODO: Here is the place to check for connections between
# fields, other than by sequential order. Check if LMS
# writes anything. (It does not!)
def gen_measurement_table(self, model_id, model):
"""Read measurements."""
mnums = np.nonzero(self.uff_types[model] == 58)[0]
mnums += model[0]
if len(mnums) == 0:
return False
mlist = []
#dlist = pd.DataFrame()
# .. Create field list.
sdata = self.uff_object.read_sets(mnums[0])
fields = ['model_id', 'measurement_id', 'uffid', 'field_type']
fields.extend([key for key in sdata.keys() if not ('x' in key or 'data' in key)])
concat_list = []
for mnum in list(mnums):
dlist_ = pd.DataFrame()
sdata = self.uff_object.read_sets(mnum)
# .. Setup a new line in measurement index table.
line = [model_id, mnum, mnum, 58]
line.extend([sdata[key] for key in fields if not ('uffid' in key or 'field_type' in key or 'model_id' in key or 'measurement_id' in key)])
mlist.append(line)
# TODO: Uredi podporo za kompleksne vrednosti tukaj. NE ลกtima ลกe ฤist!
dlist_['frq'] = sdata['x']
dlist_['amp'] = sdata['data']
dlist_['amp'] = dlist_['amp'].astype('complex')
dlist_['amp'] = sdata['data']
dlist_['uffid'] = mnum
dlist_['measurement_id'] = mnum
dlist_['model_id'] = model_id
concat_list.append(dlist_)
dlist = pd.concat(concat_list, ignore_index=True)
concat_list = []
if 'measurement_index' in self.tables:
self.tables['measurement_index'] = pd.concat([self.tables['measurement_index'], pd.DataFrame(mlist, columns=fields)], ignore_index=True)
self.tables['measurement_values'] = pd.concat([self.tables['measurement_values'], dlist], ignore_index=True)
else:
self.tables['measurement_index'] = | pd.DataFrame(mlist, columns=fields) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 23 10:36:23 2022
@author: lawashburn
"""
import os
import csv
import pandas as pd
import numpy as np
from datetime import datetime
now = datetime.now()
spectra_import = input('Enter path to .txt extract file from step 1: ')
working_directory = input('Enter path to working directory: ')
data_type = input('Enter tissue type')
trial = input('Enter trial number')
error_marg = input('Enter MS1 ppm error cutoff: ')
intensity = input('Enter intensity cutoff: ')
scan_cut = input('Enter minimum scan # cutoff: ')
#spectra_import = r"C:\Users\lawashburn\Documents\HyPep1.0\HyPep_Simple_ASMS_Results\Raw_Files\Formatted_MS2\PO_3_ms2_output_list.txt"#path to spectra after RawConverter
#working_directory = r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\filtered_lists"
#data_type = 'PO_'
#trial = '3_'
#error_marg = 10 #+/- ppm
trial = str(trial)
#formats spectra import values
spectra_import = pd.read_csv(spectra_import, sep=" ",skiprows=[0], names= ["m/z", "resolution", "charge", "intensity","MS2",'scan_number','empty'])
#spectra_import.columns = ["m/z", "resolution", "charge", "intensity","MS2",'scan_number','empty']
spectra_import = spectra_import.apply(pd.to_numeric)
spectra_value = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
| tm.assert_frame_equal(df2, expected) | pandas.util.testing.assert_frame_equal |
import pytest
import unittest
from unittest import mock
from ops.tasks.anomalyDetection import anomalyService
from anomaly.models import Anomaly
from pandas import Timestamp
from decimal import Decimal
from mixer.backend.django import mixer
import pandas as pd
@pytest.mark.django_db(transaction=True)
def test_createAnomalyService(client, mocker):
fakedata = [{'ds': | Timestamp('2021-06-01 00:00:00+0000', tz='UTC') | pandas.Timestamp |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
from scipy.special import gamma,gammainc,gammaincc
from scipy.stats import norm
from scipy.optimize import minimize,root_scalar
import networkx as nx
from operator import itemgetter
ep = 1e-80 #For preventing overflow errors in norm.cdf
tref = pd.to_datetime('2020-01-01') #Reference time for converting dates to numbers
################# FORMATTING ########################
def format_JH(url,drop_list,columns):
data = pd.read_csv(url)
if len(columns) == 2:
data[columns[1]] = data[columns[1]].fillna(value='NaN')
data = data.T.drop(drop_list).T.set_index(columns).T
data.index = pd.to_datetime(data.index,format='%m/%d/%y')
return data
def format_kaggle(folder,metric):
data_full = pd.read_csv(folder+'train.csv')
data = data_full.pivot_table(index='Date',columns=['Country_Region','Province_State'],values=metric)
data.index = pd.to_datetime(data.index,format='%Y-%m-%d')
return data
def format_predictions(path):
pred = pd.read_csv(path).fillna(value='NaN').set_index(['Country/Region','Province/State'])
for item in ['Nmax','Nmax_low','Nmax_high','sigma','sigma_low','sigma_high']:
pred[item] = pd.to_numeric(pred[item])
for item in ['th','th_low','th_high']:
pred[item] = pd.to_datetime(pred[item],format='%Y-%m-%d')
return pred
def load_sim(path):
data = pd.read_csv(path,index_col=0,header=[0,1])
data.index = | pd.to_datetime(data.index,format='%Y-%m-%d') | pandas.to_datetime |
#########################################################
### DNA variant annotation tool
### Version 1.0.0
### By <NAME>
### <EMAIL>
#########################################################
import pandas as pd
import numpy as np
import allel
import argparse
import subprocess
import sys
import os.path
import pickle
import requests
import json
def extract_most_deleterious_anno(row, num_ann_max):
ann_order = pd.read_csv(anno_order_file, sep=' ')
alt = row[:num_ann_max]
anno = row[num_ann_max:]
alt.index = range(0, len(alt))
anno.index = range(0, len(anno))
ann_all_alt = pd.DataFrame()
alt_unique = alt.unique()
for unique_alt in alt_unique:
if unique_alt != '':
anno_all = anno[alt == unique_alt]
ann_order_all = pd.DataFrame()
for ann_any in anno_all:
if sum(ann_any == ann_order.Anno) > 0:
ann_any_order = ann_order[ann_order.Anno == ann_any]
else:
ann_any_order = ann_order.iloc[ann_order.shape[0]-1]
ann_order_all = ann_order_all.append(ann_any_order)
small_ann = ann_order_all.sort_index(ascending=True).Anno.iloc[0]
ann_unique_alt = [unique_alt, small_ann]
ann_all_alt = ann_all_alt.append(ann_unique_alt)
ann_all_alt.index = range(0, ann_all_alt.shape[0])
return ann_all_alt.T
def run_snpeff(temp_out_name):
snpeff_command = ['java', '-Xmx4g', '-jar', snpeff_path, \
'-ud', '0', \
# '-v', \
'-canon', '-noStats', \
ref_genome, vcf_file]
temp_output = open(temp_out_name, 'w')
subprocess.run(snpeff_command, stdout=temp_output)
temp_output.close()
def get_max_num_ann(temp_out_name):
num_ann_guess = 500
callset = allel.vcf_to_dataframe(temp_out_name, fields='ANN', numbers={'ANN': num_ann_guess})
num_ann = callset.apply(lambda x: sum(x != ''), axis=1)
num_ann_max = num_ann.max() # num_ann_max = 175
return num_ann_max
def get_ann_from_output_snpeff(temp_out_name):
callset = allel.read_vcf(temp_out_name, fields='ANN', transformers=allel.ANNTransformer(), \
numbers={'ANN': num_ann_max})
df1 = pd.DataFrame(data=callset['variants/ANN_Allele'])
df2 = pd.DataFrame(data=callset['variants/ANN_Annotation'])
df3 = pd.concat((df1, df2), axis=1)
df3.columns = range(0, df3.shape[1])
return df3
def get_anno_total(anno_from_snpeff):
anno_total = pd.DataFrame()
pickle_dump = 'pickle_dump.temp'
if not os.path.isfile(pickle_dump):
print('Extracting most deleterious annotations generated by SnpEff')
for index, row in anno_from_snpeff.iterrows():
anno_row = extract_most_deleterious_anno(row, num_ann_max)
anno_total = anno_total.append(anno_row)
print('done')
dump_file = open(pickle_dump, 'wb')
pickle.dump(anno_total, dump_file, pickle.HIGHEST_PROTOCOL)
dump_file.close()
dump_file = open(pickle_dump, 'rb')
anno_total = pickle.load(dump_file)
a = ['Alt_' + str(i) for i in range(1, num_alt + 1)]
b = ['Anno_' + str(i) for i in range(1, num_alt + 1)]
c = list(range(0, num_alt * 2))
c[::2] = a
c[1::2] = b
anno_total.columns = c
anno_total.replace(np.nan, -1, inplace=True)
anno_total.index = range(0, anno_total.shape[0])
return anno_total
def get_num_alternate(vcf_file):
num_alt = allel.read_vcf(vcf_file, fields='numalt')['variants/numalt'].max()
return num_alt
def get_dp_ro_ao(temp_out_name):
callset_dp_ro_ao = allel.vcf_to_dataframe(temp_out_name, fields=['DP', 'RO', 'AO'], alt_number=num_alt)
callset_dp_ro_ao.index = range(0, callset_dp_ro_ao.shape[0])
return callset_dp_ro_ao
def get_alt_ref_ratio(callset_dp_ro_ao):
callset_ratio = pd.DataFrame()
for i in range(0, num_alt):
# print('run ratio: ', i)
callset_ratio[i] = callset_dp_ro_ao.apply(lambda x: x[i + 2] / x[1], axis=1)
# print('run ratio: ', i, ' done')
# print('callset_ratio is done')
callset_ratio.columns = ['RatioAR_Alt_' + str(i) for i in range(1, num_alt + 1)]
callset_ratio.index = range(0, callset_ratio.shape[0])
return callset_ratio
def combine_anno_and_callset(anno_total, callset_dp_ro_ao, callset_ratio, ExAC_variant_af, ExAC_variant_ordered_csqs):
anno_and_callset = pd.concat([anno_total, callset_dp_ro_ao, callset_ratio, ExAC_variant_af, ExAC_variant_ordered_csqs], axis=1)
return anno_and_callset
def combine_with_comma(row):
a = []
for i in range(0, len(row)):
if row.iloc[i][0] != '-':
a.append(True)
else:
a.append(False)
b = ",".join(row[a])
return b
def get_anno_good(anno_and_callset):
anno_columns = pd.DataFrame()
for i in range(1, num_alt + 1):
Alt_i = 'Alt_' + str(i)
Anno_i = 'Anno_' + str(i)
AO_i = 'AO_' + str(i)
RatioAR_Alt_i = 'RatioAR_Alt_' + str(i)
exac_var_af = 'exac_' + search_af + "_" + str(i)
exac_ordered_csqs = 'exac_' + search_ordered_csqs + '_' + str(i)
column_i = anno_and_callset[[Alt_i, Anno_i, 'DP', 'RO', AO_i, RatioAR_Alt_i, exac_var_af, exac_ordered_csqs]].apply(lambda x: '|'.join(x.map(str)), axis=1)
anno_columns = pd.concat([anno_columns, column_i], axis=1)
anno_one_column = anno_columns.apply(combine_with_comma, axis=1)
anno_good = ["ANN="] * len(anno_one_column) + anno_one_column
return anno_good
def get_num_lines_header(contents):
lines_header = 0
for i in range(0, len(contents)):
if contents[i][0] == '#' and contents[i + 1][0] != '#':
# print(contents[i])
# print(i)
lines_header = i # lines_header 142
return lines_header
def generate_output_vcf(vcf_file, anno_good):
input_vcf = pd.read_csv(vcf_file, sep='\t', skiprows=lines_header)
anno_good_all = input_vcf.INFO + ';' + anno_good
input_vcf.INFO = anno_good_all
output_vcf = input_vcf.copy()
return output_vcf
def generate_header(contents):
header = contents[0:lines_header]
header_add1 = """##SimpleAnnotation Version="0.0.1" By <NAME> <EMAIL> \n"""
header_add2 = """##SimpleAnnotation Cmd="python3 SimpleAnnotation.py -input {} -snpeff {} -genome {} "\n""".format(vcf_file, snpeff_path, ref_genome)
header_add3 = """##INFO=<ID=ANN,Number=.,Type=String, Description="Simple annotations: 'Alternate allele | Type of variation most deleterious | Sequence depth at the site of variation | Number of reads of reference | Number of reads of alternate | Ratio of read counts of alt vs ref | ExAC variant Allele Frequency | ExAC variant consequence most deleterious' ">\n"""
header.append(header_add1)
header.append(header_add2)
header.append(header_add3)
return header
def search_REST_ExAC(row, search_type):
row_var = [-1] * len(row)
url_1 = 'http://exac.hms.harvard.edu/rest/variant/{}/'.format(search_type)
for i in range(0, len(row)):
if row.iloc[i][-1] != '-':
url = url_1 + row.iloc[i]
my_response = requests.get(url)
if my_response.ok:
j_data = json.loads(my_response.content)
if search_type == search_af:
if 'allele_freq' in j_data.keys():
row_var[i] = j_data['allele_freq']
else:
row_var[i] = 'Not_found'
elif search_type == search_ordered_csqs:
if j_data != None and len(j_data) > 1:
row_var[i] = j_data[1]
else:
row_var[i] = 'Not_found'
else:
row_var[i] = 'Not_found'
return row_var
def ExAC_search_variant(var_all, search_type):
exac = pd.DataFrame()
counter = 0
print('There are {} variants that need to be searched. This will take a while.'.format(var_all.shape[0]))
for index, row in var_all.iterrows():
af_row = search_REST_ExAC(row, search_type)
exac = pd.concat([exac, pd.DataFrame(af_row)], axis=1)
counter += 1
if counter%500 == 0:
print(counter)
exac = exac.T
exac.index = range(0, exac.shape[0])
exac.columns = ['exac_' + search_type + '_' + str(i) for i in range(1, num_alt + 1)]
return exac
def generate_var_id_for_exac(vcf_file):
callset = allel.vcf_to_dataframe(vcf_file, fields=['CHROM', 'POS', 'REF', 'ALT'], alt_number=num_alt)
var_all = pd.DataFrame()
for i in range(1, num_alt+1):
ALT_i = 'ALT_' + str(i)
var_i = callset[['CHROM', 'POS', 'REF', ALT_i]].apply(lambda x: "-".join(x.map(str)), axis=1)
var_all = | pd.concat([var_all, var_i], axis=1) | pandas.concat |
'''
manipulation (:mod:`calour.manipulation`)
=========================================
.. currentmodule:: calour.manipulation
Functions
^^^^^^^^^
.. autosummary::
:toctree: generated
join_metadata_fields
join_experiments
join_experiments_featurewise
aggregate_by_metadata
'''
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from copy import deepcopy
from logging import getLogger
from collections import defaultdict
import pandas as pd
import numpy as np
from .experiment import Experiment
from .util import join_fields
logger = getLogger(__name__)
def chain(exp: Experiment, steps=[], inplace=False, **kwargs) -> Experiment:
'''Perform multiple operations sequentially.
Parameters
----------
steps : list of callables
Each callable is a class method that has a boolean
parameter of ``inplace``, and returns an
:class:`.Experiment` object.
inplace : bool, default=False
change occurs in place or not.
kwargs : dict
keyword arguments to pass to each class method. The dict
key should be in the form of
"<method_name>__<param_name>". For example,
"exp.chain(steps=[filter_samples, log_n], log_n__n=3)"
will call :func:`filter_samples` first and then
:func:`log_n` while setting its parameter `n=3`.
Returns
-------
Experiment
'''
exp = exp if inplace else deepcopy(exp)
params = defaultdict(dict)
for k, v in kwargs.items():
transformer, param_name = k.split('__')
if param_name == 'inplace':
raise ValueError(
'You can not set `inplace` for individual transformation.')
params[transformer][param_name] = v
for step in steps:
step(exp, inplace=True, **params[step.__name__])
return exp
def join_metadata_fields(exp: Experiment, field1, field2, new_field=None,
axis='s', inplace=True, **kwargs) -> Experiment:
'''Join 2 fields in sample or feature metadata into 1.
Parameters
----------
field1 : str
Name of the first field to join. The value in this column can be any data type.
field2 : str
Name of the field to join. The value in this column can be any data type.
new_field : str, default=None
name of the new (joined) field. Default to name it as field1 + sep + field2
sep : str, optional
The separator between the values of the two fields when joining
kwargs : dict
Other parameters passing to :func:`join_fields`.
Returns
-------
Experiment
See Also
--------
join_fields
'''
if not inplace:
exp = deepcopy(exp)
if axis == 0:
md = exp.sample_metadata
else:
md = exp.feature_metadata
join_fields(md, field1, field2, new_field, **kwargs)
return exp
def aggregate_by_metadata(exp: Experiment, field, agg='mean', axis=0, inplace=False) -> Experiment:
'''Aggregate all samples or features of the same group.
Group the samples (axis=0) or features (axis=1) that have the same
value in the column of given field and then aggregate the data
table of each group with the given method.
The number of samples/features in each group and their IDs are
stored in new metadata columns '_calour_merge_number' and
'_calour_merge_ids', respectively. For other metadata, the first
one in the metadata table in each group is kept in the final
returned experiment object.
.. warning:: It will convert the ``Experiment.data`` from the
sparse matrix to dense array.
Parameters
----------
field : str
The sample/feature metadata field to group samples/features
agg : str, optional
aggregate method. Choice includes:
* 'mean' : the mean of the group
* 'median' : the median of the group
* 'sum' : the sum of of the group
axis : 0, 1, 's', or 'f', optional
0 or 's' (default) to aggregate samples; 1 or 'f' to aggregate features
inplace : bool, optional
False (default) to create new Experiment, True to perform inplace
Returns
-------
Experiment
'''
logger.debug('Merge data using field %s, agg %s' % (field, agg))
if not inplace:
exp = deepcopy(exp)
if axis == 0:
col = exp.sample_metadata[field]
else:
col = exp.feature_metadata[field]
# convert to dense for efficient slicing
exp.sparse = False
uniq = col.unique()
n = len(uniq)
keep_pos = np.empty(n, dtype=np.uint32)
merge_number = np.empty(n, dtype=np.uint32)
# use object as dtype for string
merge_ids = np.empty(n, dtype=object)
for i, val in enumerate(uniq):
if | pd.isnull(val) | pandas.isnull |
#!/usr/bin/env python
import multiprocessing
import pandas as pd
import sys
import os
from io import StringIO
import re
import Bio
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
file = sys.argv[1]
threads=int(sys.argv[2])
def clean_pep(val):
regex = re.compile('[^a-zA-Z]')
rval = regex.sub('', val)
return rval
def process_res(val):
r = val
r = r.split('\n')
header = r[0]
exclude =[ '# No solutions found.',
'# too few peaks...' ,
'# Could not process spectrum...']
if header != '':
cols = r[1]
if cols not in exclude:
t = StringIO('\n'.join(r[1:]))
table = | pd.read_csv(t, sep='\t') | pandas.read_csv |
# author: <NAME>, <NAME>, <NAME>, <NAME>
# date: 2020-06-12
'''This script read ministries' comments data from interim directory and predicted
labels of question 1 from interim directory, joins both databases, and saves it in specified directory.
There are 2 parameters Input and Output Path where you want to write this data.
Usage: merge_ministry_pred.py --input_dir=<input_dir_path> --output_dir=<destination_dir_path>
Example:
python src/data/merge_ministry_pred.py --input_dir=data/ --output_dir=data/interim/
Options:
--input_dir=<input_dir_path> Location of data Directory
--output_dir=<destination_dir_path> Directory for saving ministries files
'''
import numpy as np
import pandas as pd
import os
from docopt import docopt
opt = docopt(__doc__)
def main(input_dir, output_dir):
assert os.path.exists(input_dir), "The path entered for input_dir does not exist. Make sure to enter correct path \n"
assert os.path.exists(output_dir), "The path entered for output_dir does not exist. Make sure to enter correct path \n"
print("\n--- START: merge_ministry_pred.py ---")
### Question 1 - Predictions on 2015 dataset ###
# Ministries data
print("Loading Q1 ministries' data and predictions into memory.")
# QUAN 2015
ministries_q1 = pd.read_excel(input_dir + "/interim/question1_models/advance/ministries_Q1.xlsx")
ministries_2015 = pd.read_excel(input_dir + "/interim/question1_models/advance/ministries_2015.xlsx")
pred_2015 = np.load(input_dir + "/output/theme_predictions/theme_question1_2015.npy")
assert len(ministries_q1) > 0, 'no records in ministries_q1.xlsx'
assert len(ministries_2015) > 0, 'no records in ministries_2015.xlsx'
columns_basic = ['Telkey', 'Comment', 'Year', 'Ministry', 'Ministry_id']
columns_labels = ['CPD', 'CB', 'EWC', 'Exec', 'FEW', 'SP', 'RE', 'Sup',
'SW', 'TEPE', 'VMG', 'OTH']
columns_order = columns_basic + columns_labels
pred_2015 = | pd.DataFrame(pred_2015, columns=columns_labels) | pandas.DataFrame |
import config as cf
import pandas as pd
import os
import sys
import networkx as nx
from models import models_game_elasticity
assert cf
import argparse
parser = argparse.ArgumentParser(description='Network simulations.')
parser.add_argument('--network_type', type=str, default='scale_free',
help='Network type for loadind and storing...')
parser.add_argument('--network_name', type=str, default='scale_free_1000',
help='Network name for loading and storing...')
parser.add_argument('--game_strategy', type=str, default='defect',
help='Fixed strategy..')
parser.add_argument('--fixed_fraction', type=float, default=0.3,
help='Fixed fraction..')
parser.add_argument('--num_nodes', type=int, default=1000,
help='Number of nodes for specific network...')
parser.add_argument('--type_sim', default='global',type=str,
help='For running local or global simulation')
parser.add_argument('--n_iters', default=20,type=int,
help='Number of iterations')
parser.add_argument('--max_time', default=150,type=int,
help='Number of days of simulation')
args = parser.parse_args()
main_path = os.path.split(os.getcwd())[0] + '/Epidemiology_behavior_dynamics'
config_path = main_path + '/config.csv'
config_data = pd.read_csv(config_path, sep=',', header=None, index_col=0)
networks_path = config_data.loc['networks_dir'][1]
results_path = os.path.join(config_data.loc['results_dir'][1], 'elasticity_test')
awareness_path = config_data.loc['sigma_search_dir'][1]
infection_prob_path = config_data.loc['beta_search_dir'][1]
num_nodes = args.num_nodes
sigma_search = pd.read_csv(awareness_path, dtype={'key':str, 'value':float})
beta_search = pd.read_csv(infection_prob_path, dtype={'key':str, 'value':float})
G = nx.read_gpickle( os.path.join(main_path, networks_path, str(num_nodes), args.network_name) )
df = pd.concat([sigma_search, beta_search], axis=1)
df_param_run = | pd.DataFrame(columns=['beta_key', 'sigma_key', 'beta_val', 'sigma_val']) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Interval,
IntervalIndex,
Timedelta,
Timestamp,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4]), Index([1, 3, 5])),
(Index([0.0, 1.0, 2.0]), Index([1.0, 2.0, 3.0])),
(timedelta_range("0 days", periods=3), timedelta_range("1 day", periods=3)),
(date_range("20170101", periods=3), date_range("20170102", periods=3)),
(
date_range("20170101", periods=3, tz="US/Eastern"),
date_range("20170102", periods=3, tz="US/Eastern"),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
class TestAttributes:
@pytest.mark.parametrize(
"left, right",
[
(0, 1),
(Timedelta("0 days"), Timedelta("1 day")),
(Timestamp("2018-01-01"), Timestamp("2018-01-02")),
(
Timestamp("2018-01-01", tz="US/Eastern"),
Timestamp("2018-01-02", tz="US/Eastern"),
),
],
)
@pytest.mark.parametrize("constructor", [IntervalArray, IntervalIndex])
def test_is_empty(self, constructor, left, right, closed):
# GH27219
tuples = [(left, left), (left, right), np.nan]
expected = np.array([closed != "both", False, False])
result = constructor.from_tuples(tuples, closed=closed).is_empty
tm.assert_numpy_array_equal(result, expected)
class TestMethods:
@pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"])
def test_set_closed(self, closed, new_closed):
# GH 21670
array = IntervalArray.from_breaks(range(10), closed=closed)
result = array.set_closed(new_closed)
expected = IntervalArray.from_breaks(range(10), closed=new_closed)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
Interval(0, 1, closed="right"),
IntervalArray.from_breaks([1, 2, 3, 4], closed="right"),
],
)
def test_where_raises(self, other):
ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4], closed="left"))
match = "'value.closed' is 'right', expected 'left'."
with pytest.raises(ValueError, match=match):
ser.where([True, False, True], other=other)
def test_shift(self):
# https://github.com/pandas-dev/pandas/issues/31495
a = IntervalArray.from_breaks([1, 2, 3])
result = a.shift()
# int -> float
expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)])
tm.assert_interval_array_equal(result, expected)
def test_shift_datetime(self):
a = IntervalArray.from_breaks(date_range("2000", periods=4))
result = a.shift(2)
expected = a.take([-1, -1, 0], allow_fill=True)
tm.assert_interval_array_equal(result, expected)
result = a.shift(-1)
expected = a.take([1, 2, -1], allow_fill=True)
tm.assert_interval_array_equal(result, expected)
class TestSetitem:
def test_set_na(self, left_right_dtypes):
left, right = left_right_dtypes
result = IntervalArray.from_arrays(left, right)
if result.dtype.subtype.kind not in ["m", "M"]:
msg = "'value' should be an interval type, got <.*NaTType'> instead."
with pytest.raises(TypeError, match=msg):
result[0] = pd.NaT
if result.dtype.subtype.kind in ["i", "u"]:
msg = "Cannot set float NaN to integer-backed IntervalArray"
with pytest.raises(ValueError, match=msg):
result[0] = np.NaN
return
result[0] = np.nan
expected_left = Index([left._na_value] + list(left[1:]))
expected_right = Index([right._na_value] + list(right[1:]))
expected = IntervalArray.from_arrays(expected_left, expected_right)
tm.assert_extension_array_equal(result, expected)
def test_setitem_mismatched_closed(self):
arr = IntervalArray.from_breaks(range(4))
orig = arr.copy()
other = arr.set_closed("both")
msg = "'value.closed' is 'both', expected 'right'"
with pytest.raises(ValueError, match=msg):
arr[0] = other[0]
with pytest.raises(ValueError, match=msg):
arr[:1] = other[:1]
with pytest.raises(ValueError, match=msg):
arr[:0] = other[:0]
with pytest.raises(ValueError, match=msg):
arr[:] = other[::-1]
with pytest.raises(ValueError, match=msg):
arr[:] = list(other[::-1])
with pytest.raises(ValueError, match=msg):
arr[:] = other[::-1].astype(object)
with pytest.raises(ValueError, match=msg):
arr[:] = other[::-1].astype("category")
# empty list should be no-op
arr[:0] = []
tm.assert_interval_array_equal(arr, orig)
def test_repr():
# GH 25022
arr = IntervalArray.from_tuples([(0, 1), (1, 2)])
result = repr(arr)
expected = (
"<IntervalArray>\n"
"[(0, 1], (1, 2]]\n"
"Length: 2, dtype: interval[int64, right]"
)
assert result == expected
# ----------------------------------------------------------------------------
# Arrow interaction
pyarrow_skip = td.skip_if_no("pyarrow", min_version="0.16.0")
@pyarrow_skip
def test_arrow_extension_type():
import pyarrow as pa
from pandas.core.arrays._arrow_utils import ArrowIntervalType
p1 = ArrowIntervalType(pa.int64(), "left")
p2 = ArrowIntervalType(pa.int64(), "left")
p3 = ArrowIntervalType(pa.int64(), "right")
assert p1.closed == "left"
assert p1 == p2
assert not p1 == p3
assert hash(p1) == hash(p2)
assert not hash(p1) == hash(p3)
@pyarrow_skip
def test_arrow_array():
import pyarrow as pa
from pandas.core.arrays._arrow_utils import ArrowIntervalType
intervals = pd.interval_range(1, 5, freq=1).array
result = pa.array(intervals)
assert isinstance(result.type, ArrowIntervalType)
assert result.type.closed == intervals.closed
assert result.type.subtype == pa.int64()
assert result.storage.field("left").equals(pa.array([1, 2, 3, 4], type="int64"))
assert result.storage.field("right").equals(pa.array([2, 3, 4, 5], type="int64"))
expected = pa.array([{"left": i, "right": i + 1} for i in range(1, 5)])
assert result.storage.equals(expected)
# convert to its storage type
result = pa.array(intervals, type=expected.type)
assert result.equals(expected)
# unsupported conversions
with pytest.raises(TypeError, match="Not supported to convert IntervalArray"):
pa.array(intervals, type="float64")
with pytest.raises(TypeError, match="different 'subtype'"):
pa.array(intervals, type=ArrowIntervalType(pa.float64(), "left"))
@pyarrow_skip
def test_arrow_array_missing():
import pyarrow as pa
from pandas.core.arrays._arrow_utils import ArrowIntervalType
arr = IntervalArray.from_breaks([0.0, 1.0, 2.0, 3.0])
arr[1] = None
result = pa.array(arr)
assert isinstance(result.type, ArrowIntervalType)
assert result.type.closed == arr.closed
assert result.type.subtype == pa.float64()
# fields have missing values (not NaN)
left = pa.array([0.0, None, 2.0], type="float64")
right = pa.array([1.0, None, 3.0], type="float64")
assert result.storage.field("left").equals(left)
assert result.storage.field("right").equals(right)
# structarray itself also has missing values on the array level
vals = [
{"left": 0.0, "right": 1.0},
{"left": None, "right": None},
{"left": 2.0, "right": 3.0},
]
expected = pa.StructArray.from_pandas(vals, mask=np.array([False, True, False]))
assert result.storage.equals(expected)
@pyarrow_skip
@pytest.mark.parametrize(
"breaks",
[[0.0, 1.0, 2.0, 3.0], | date_range("2017", periods=4, freq="D") | pandas.date_range |
import os
import cv2
import pandas as pd
import torch
import data_loader as dl
from models.model_zoo import get_model
from train import get_empty_scores_dict
from transformations import transforms as trfs
from utils.utilities import parse_args, parse_yaml
def predict_data_set(
model,
data_loader,
device,
cpu_device,
output_path
):
# Don't need to keep track of gradients
with torch.no_grad():
if model.training:
# Set to evaluation mode (BatchNorm and Dropout works differently)
model.eval()
# Validation loop
for ii, (images, targets, image_ids) in enumerate(data_loader):
# Tensors to device
images = list(image.to(device) for image in images)
outputs = model(images)
outputs = [
{
k: v.to(cpu_device).numpy() for k, v in t.items()
} for t in outputs
]
# TODO: Add NMS
for idx, (image, image_id) in enumerate(zip(images, image_ids)):
preds = outputs[idx]['boxes']
image_vis = cv2.imread(os.path.join(os.getcwd(), 'datasets', 'train', 'Positive', image_id))
for row in preds:
c1 = int(row[0]), int(row[1])
c2 = int(row[2]), int(row[3])
cv2.rectangle(image_vis, c1, c2, (0, 0, 255), 3)
cv2.imwrite(os.path.join(output_path, image_id), image_vis)
def predict_model(
valid_data_loader,
model,
path_save_model,
output_path
):
"""
"""
scores_dict_valid = get_empty_scores_dict(valid_data_loader)
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
cpu_device = torch.device('cpu')
model.to(device)
model.load_state_dict(torch.load(path_save_model, map_location=torch.device('cpu')))
predict_data_set(
model, valid_data_loader, device,
cpu_device,
output_path
)
df_scores_valid = | pd.DataFrame(scores_dict_valid) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series( | date_range('2013-01-01', '2013-01-03') | pandas.date_range |
import argparse
import gc
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
from pathlib import Path
import pickle
import random
from scipy import stats
from sklearn.metrics import accuracy_score
from gefs.learning import LearnSPN
from gefs.trees import Tree, RandomForest
from prep import get_data, learncats, get_stats, normalize_data, standardize_data
from knn_imputer import KNNImputer
from simple_imputer import SimpleImputer
from miss_forest import MissForest
# Auxiliary functions
def str2bool(v):
""" Converts a string to a boolean value. """
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
# Hyperparameters
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset', '-d',
type=str,
default='wine',
)
parser.add_argument(
'--runs', '-r',
nargs='+',
type=int,
default=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
)
parser.add_argument(
'--n_folds', '-f',
type=int,
default=5,
)
parser.add_argument(
'--n_estimators', '-e',
type=int,
default=100,
)
parser.add_argument(
'--msl', '-m',
nargs='+',
type=int,
default=[1],
)
parser.add_argument(
'--lspn', '-l',
type=str2bool,
default='false',
)
FLAGS, unparsed = parser.parse_known_args()
data, ncat = get_data(FLAGS.dataset)
# min_sample_leaf is the minimum number of samples at leaves
# Define which values of min_sample_leaf to test
min_sample_leaves = FLAGS.msl
min_sample_leaves = [msl for msl in min_sample_leaves if msl < data.shape[0]/10]
meanspath = os.path.join('missing', FLAGS.dataset + '_mf_means.csv')
cispath = os.path.join('missing', FLAGS.dataset + '_mf_cis.csv')
Path('missing').mkdir(parents=True, exist_ok=True)
if FLAGS.lspn:
methods = ['Friedman', 'Mean', 'Surr', 'KNN', 'MissForest', 'GeFp', 'GeF', 'GeFp(LSPN)', 'GeF(LSPN)']
else:
methods = ['Friedman', 'Mean', 'Surr', 'KNN', 'MissForest', 'GeFp', 'GeF']
df_all = pd.DataFrame()
np.seterr(invalid='raise')
completed_runs = 0
for run in FLAGS.runs:
print('####### DATASET: ', FLAGS.dataset, " with shape ", data.shape)
print('####### RUN: ', run)
print('####### ', FLAGS.n_folds, ' folds')
np.random.seed(run) # Easy way to reproduce the folds
folds = np.zeros(data.shape[0])
for c in np.unique(data[ :, -1]):
nn = np.sum(data[ :, -1] == c)
ind = np.tile(np.arange(FLAGS.n_folds), int(np.ceil(nn/FLAGS.n_folds)))[:nn]
folds[data[:, -1] == c] = np.random.choice(ind, nn, replace=False)
for min_samples_leaf in min_sample_leaves:
print(' min samples: ', min_samples_leaf)
for fold in range(FLAGS.n_folds):
print('####### Fold: ', fold)
train_data = data[np.where(folds!=fold)[0], :]
test_data = data[np.where(folds==fold)[0], :]
# Standardize train data only
_, maxv, minv, mean, std = get_stats(train_data, ncat)
train_data = standardize_data(train_data, mean, std)
test_data = standardize_data(test_data, mean, std)
X_train, X_test = train_data[:, :-1], test_data[:, :-1]
y_train, y_test = train_data[:, -1], test_data[:, -1]
imputer = SimpleImputer(ncat=ncat[:-1], method='mean').fit(X_train)
knn_imputer = KNNImputer(ncat=ncat[:-1], n_neighbors=7).fit(X_train)
cat_vars = np.where(ncat[:-1]>1)[0]
if len(cat_vars) == 0:
cat_vars = None
forest_imputer = MissForest(random_state=run).fit(X_train, cat_vars=cat_vars)
np.random.seed(run)
print(' Training')
rf = RandomForest(n_estimators=FLAGS.n_estimators, ncat=ncat, min_samples_leaf=min_samples_leaf, surrogate=True, random_state=run)
rf.fit(X_train, y_train)
print(' Converting to GeF')
gef = rf.topc()
gef.maxv, gef.minv = maxv, minv
if FLAGS.lspn:
print(' Converting to GeF(LSPN)')
gef_lspn = rf.topc(learnspn=30) # 30 is the number of samples required to fit LearnSPN
gef_lspn.maxv, gef_lspn.minv = maxv, minv
print(" Inference")
for i in tqdm(np.arange(0, 1., 0.1)):
df = | pd.DataFrame() | pandas.DataFrame |
import sys
import os
import math
import pandas as pd
import numpy as np
import camoco as co
from collections import OrderedDict
from camoco.Tools import log
class simulateGWAS(object):
def __init__(self,cob=None,go=None):
# GWAS Simulations needs a COB and an Gene Ontology
self.cob = cob
self.go = go
self.method = 'density'
# Give a place to store the results
self.results = | pd.DataFrame() | pandas.DataFrame |
# content-based filtering
# ๊ธฐ์ฌ ๋ด์ฉ์ ํ ๋๋ก ํ์ฌ ์ ์ฌํ ๊ธฐ์ฌ ์ถ์ฒ(์ฌ์ฉ์์ ๋ํ ์ ๋ณด๊ฐ ์ผ๋ง ์์ ๋๋ ์ด ๋ฐฉ์์ผ๋ก ๋ด์ค๋ฅผ ๋๋ฅด๋ฉด ์ถ์ฒ์ด ๋จ๋๋ก ํ๋ค.)
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
df = pd.read_csv('../output/Article_economy_201701_201804.csv', header=None, names=['publish_year', 'catagory', 'publish', 'title', 'content', 'url'])
index_list = list(range(0, 500, 1))
df = | pd.DataFrame(df[:500], index=index_list) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.